hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
23df5abf4619578c4016b7ce81bf973eb55708c0.hip | // !!! This is a file automatically generated by hipify!!!
#include <lsf_pipelines.h>
static int no_of_pipelines = 2;
/* arrays, each element belongs to one pipeline each */
vector<hipStream_t> lsf_stream;
static vector<cudnnHandle_t> cudnnHandles;
static vector<hipblasHandle_t> cublasHandles;
void lsf_initialize() {
for (int i = 0; i < no_of_pipelines; i++) {
lsf_stream.push_back(hipStream_t());
cudnnHandles.push_back(cudnnHandle_t());
cublasHandles.push_back(hipblasHandle_t());
hipStreamCreate(&lsf_stream[i]);
cudnnCreate(&cudnnHandles[i]);
cudnnSetStream(cudnnHandles[i], lsf_stream[i]);
hipblasCreate(&cublasHandles[i]);
hipblasSetStream(cublasHandles[i], lsf_stream[i]);
}
}
void lsf_dispatch(Operation *tp, int index) {
assert(index < no_of_pipelines);
hipStream_t &compute_stream = lsf_stream[index];
cudnnHandle_t &cudnn_handle = cudnnHandles[index];
hipblasHandle_t &cublas_handle = cublasHandles[index];
int i = tp->op_layer - 1;
NeuralNet *nm = tp->model;
CnmemSpace space_tracker(nm->free_bytes); // need updates here
//-- std::cout << "here\n";
//-- std::cout << "Free bytes: " << nm->free_bytes << std::endl;
if (i == 0) { // this is the first layer, load and resize image as per
// current inference pipeline
/* image im = load_image_color(nm->imgfname, 0, 0);
//size? net->w in yolo
image r = letterbox_image(im,nm->input_w, nm->input_h );
//resize_network(net, resized.w, resized.h);
show_image(im,"orig",5);
show_image(r,"letterimg",5);
//copy image data into layer_input[0]
//memcpy(&(nm->layer_input[i]),r.data,nm->layer_input_size[i]*nm->data_type_size);
nm->lockedcnmemMalloc(&(nm->layer_input[0]), nm->layer_input_size[0] *
nm->data_type_size, NULL);*/
space_tracker.updateSpace(CnmemSpace::SUB,
nm->layer_input_size[0] * nm->data_type_size);
// checkCudaErrors(hipMemcpy(nm->layer_input[0], r.data, nm->batch_size
// * nm->input_channels * nm->input_h * nm->input_w *
// nm->data_type_size, hipMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
size_t cur_workspace_size;
void *cur_workspace;
// testingg
/* hipEvent_t s,e;
float mss;
hipEventCreate(&s);
hipEventCreate(&e);
hipEventRecord(s, nm->stream_compute);
*/
nm->lockedcnmemMalloc(&(nm->layer_input[i + 1]),
nm->layer_input_size[i + 1] * nm->data_type_size,
NULL);
/* hipEventRecord(e, nm->stream_compute);
hipEventSynchronize(e);
hipEventElapsedTime(&mss,s,e);
printf("%f :",mss);
*/
space_tracker.updateSpace(CnmemSpace::SUB,
nm->layer_input_size[i + 1] * nm->data_type_size);
if (nm->layer_type[i] == CONV) {
tp->type = 'C';
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)nm->params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
nm->lockedcnmemMalloc(&cur_workspace, cur_workspace_size, NULL);
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, nm->layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
// custom coarsened cuda kernel
// customCoarsenedConvolutionForward((float *)nm->layer_input[i], (float
// *)nm->layer_input[i + 1], cur_params->conv_desc,
// cur_params->filter_desc, cur_params->input_tensor, (float
// *)cur_params->W, compute_stream);
// Batch Normalization
/* if (cur_params->bn == 1)
{
normalize_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->rolling_mean_gpu, (float
*)cur_params->rolling_variance_gpu, 1, cur_params->C_out,
cur_params->output_h * cur_params->output_w, compute_stream);
scale_bias_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->scales_gpu, 1, cur_params->C_out, cur_params->output_h *
cur_params->output_w, compute_stream); add_bias_gpu((float
*)nm->layer_input[i + 1], (float *)cur_params->b, 1, cur_params->C_out,
cur_params->output_h * cur_params->output_w, compute_stream);
}
else
{
add_bias_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->b, 1, cur_params->C_out, cur_params->output_h *
cur_params->output_w, compute_stream);
} */
checkCUDNN(cudnnAddTensor(
cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
// Replacing cuDNN call for relu to custom leaky relu call
// float *addr = (float *)(nm->layer_input[i + 1]);
// activate_array_gpu(addr, nm->layer_input_size[i + 1],
// compute_stream);
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
} else if (nm->layer_type[i] == FULLY_CONNECTED) {
tp->type = 'F';
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)nm->params[i];
// std::cout << "FChere" << i << std::endl;
if (nm->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
nm->batch_size, cur_params->C_in, &Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)nm->layer_input[i], cur_params->C_in, &Sbeta,
(float *)nm->layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasSgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
nm->batch_size, 1, &Salpha, (float *)cur_params->b,
cur_params->C_out, (float *)nm->one_vec, 1, &Salpha,
(float *)nm->layer_input[i + 1], cur_params->C_out));
} else if (nm->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
nm->batch_size, cur_params->C_in, &Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)nm->layer_input[i], cur_params->C_in, &Dbeta,
(double *)nm->layer_input[i + 1], cur_params->C_out));
checkCUBLAS(hipblasDgemm(
cublas_handle, HIPBLAS_OP_N, HIPBLAS_OP_N, cur_params->C_out,
nm->batch_size, 1, &Dalpha, (double *)cur_params->b,
cur_params->C_out, (double *)nm->one_vec, 1, &Dalpha,
(double *)nm->layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
// Replacing cuDNN call for Relu activation to custom Leaky Relu
// call
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
// activate_array_gpu((float *)nm->layer_input[i + 1],
// nm->layer_input_size[i + 1], compute_stream);
}
// std::cout << "FChere" << i << std::endl;
}
else if (nm->layer_type[i] == DROPOUT) {
tp->type = 'D';
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)nm->params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
nm->layer_input[i], cur_params->input_tensor,
nm->layer_input[i + 1], cur_params->reserved_space,
cur_params->reserved_space_size));
} else if (nm->layer_type[i] == BATCHNORM) {
tp->type = 'B';
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params =
(BatchNormLayerParams *)nm->params[i];
checkCUDNN(cudnnBatchNormalizationForwardInference(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, nm->layer_input[i],
cur_params->input_tensor, nm->layer_input[i + 1],
cur_params->sbmv_desc, cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
} else if (nm->layer_type[i] == POOLING) {
tp->type = 'P';
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)nm->params[i];
checkCUDNN(cudnnPoolingForward(
cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, nm->layer_input[i], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
} else if (nm->layer_type[i] == ACTV) {
tp->type = 'A';
ActivationLayerParams *cur_params =
(ActivationLayerParams *)nm->params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, nm->layer_input[i], &beta,
cur_params->input_tensor, nm->layer_input[i + 1]));
} else if (nm->layer_type[i] == REGION) { // Processing of region layer
tp->type = 'R';
// printf("Processing region layer %d",i);
// printf("Input layer size is %d output layer size is %d\n",
// nm->layer_input_size[i], nm->layer_input_size[i+1]);
RegionLayerParams *cur_params = (RegionLayerParams *)nm->params[i];
// printf("Batch size is %d\n", cur_params->batch_size);
forward_region_layer_gpu(
nm->layer_input_size[i], nm->layer_input_size[i + 1],
(float *)nm->layer_input[i], cur_params->batch_size,
cur_params->height, cur_params->width, cur_params->num,
cur_params->classes, cur_params->coords,
(float *)nm->layer_input[i + 1], compute_stream);
float *result =
(float *)malloc(nm->layer_input_size[i + 1] * sizeof(float));
checkCudaErrors(hipMemcpy(result, nm->layer_input[i + 1],
nm->layer_input_size[i + 1] * sizeof(float),
hipMemcpyDeviceToHost));
// int nbox=0;
// newly added block
//--detection *dets = make_network_boxes(cur_params,0.5, &nbox);
//--fill_network_boxes(cur_params,nm->img_w,nm->img_h, 0.5,0, dets,
// result, nm->layer_input_size[i+1], nm->input_w, nm->input_h);
// print_detector_detections(fps, id, dets, num, classes, w, h);
//----list *options = read_data_cfg("cfg/coco.data");
// char *name_list = option_find_str(options, "names",
// "data/names.list");
//----char *name_list = option_find_str(options, "names",
//"data/coco.names");
//--char **names = get_labels("data/coco.names");
//--image **alphabet = load_alphabet();
//--draw_detections(nm->im, dets, nbox, 0.5, names, alphabet,
// cur_params->classes);
//--save_image(nm->im, "predictions");
//--free_detections(dets, nbox);
} else if (nm->layer_type[i] == SOFTMAX) {
tp->type = 'S';
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)nm->params[i];
// custom kernel call for softmax
softmax_gpu((float *)nm->layer_input[i], cur_params->channels,
cur_params->channels,
(nm->layer_input_size[i]) / cur_params->channels,
(cur_params->w) * (cur_params->h), 1,
(cur_params->w) * (cur_params->h), 1,
(float *)nm->layer_input[i + 1], compute_stream);
// cuDNN kernel call for Softmax
/*checkCUDNN(cudnnSoftmaxForward(nm->cudnn_handle, cur_params->algo,
cur_params->mode, &alpha, cur_params->input_tensor,
nm->layer_input[i], &beta, cur_params->input_tensor,
nm->layer_input[i + 1]));*/
//-Copy the result produced by softmax layer from GPU to CPU
// checkCudaErrors(hipStreamSynchronize(nm->stream_compute));
// /////-----check....
float *result =
(float *)malloc(nm->layer_input_size[i + 1] * sizeof(float));
checkCudaErrors(hipMemcpy(result, nm->layer_input[i + 1],
nm->layer_input_size[i + 1] * sizeof(float),
hipMemcpyDeviceToHost));
// Infer the output class
// int *correct_count=0;
// nm->compareOutputCorrect(correct_count,nm->y);
// checkCNMEM(cnmemFree(nm->layer_input[nm->num_layers - 1],
// NULL)); space_tracker.updateSpace(CnmemSpace::ADD,
// nm->layer_input_size[nm->num_layers - 1] * nm->data_type_size);
//--
int top = 5;
list *options =
read_data_cfg("data/imagenet1k.data"); // specify name of the file
char *name_list = option_find_str(options, "names", 0);
if (!name_list)
name_list = option_find_str(options, "labels", "data/labels.list");
if (top == 0)
top = option_find_int(options, "top", 1);
int ii = 0;
char **names = get_labels(name_list);
// clock_t time;
int *indexes = (int *)calloc(top, sizeof(int));
// time=clock();
top_k(result, nm->layer_input_size[i + 1], top,
indexes); // check parameters of this function
// fprintf(stderr, "%s: Predicted in %f seconds.\n", input,
// sec(clock()-time));
for (ii = 0; ii < top; ++ii) {
// int index = indexes[ii];
// if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index,
// names[index], predictions[index], (net->hierarchy->parent[index]
// >= 0) ? names[net->hierarchy->parent[index]] : "Root"); else
// printf("%s: %f\n",names[index], predictions[index]);
// printf("index is %d: %5.2f%%: %s\n",index, result[index]*100,
// names[index]); printf("index is %d: %s\n",index, names[index]);
}
}
if (nm->layer_type[i] == CONV) {
nm->lockedcnmemFree(cur_workspace, NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
// kCudaErrors(hipStreamSynchronize(nm->stream_compute));
// free the memory allocated to layer_input[i]
// nm->lockedcnmemFree(nm->layer_input[i], NULL);
// space_tracker.updateSpace(CnmemSpace::ADD, nm->layer_input_size[i] *
// nm->data_type_size);
}
| 23df5abf4619578c4016b7ce81bf973eb55708c0.cu | #include <lsf_pipelines.h>
static int no_of_pipelines = 2;
/* arrays, each element belongs to one pipeline each */
vector<cudaStream_t> lsf_stream;
static vector<cudnnHandle_t> cudnnHandles;
static vector<cublasHandle_t> cublasHandles;
void lsf_initialize() {
for (int i = 0; i < no_of_pipelines; i++) {
lsf_stream.push_back(cudaStream_t());
cudnnHandles.push_back(cudnnHandle_t());
cublasHandles.push_back(cublasHandle_t());
cudaStreamCreate(&lsf_stream[i]);
cudnnCreate(&cudnnHandles[i]);
cudnnSetStream(cudnnHandles[i], lsf_stream[i]);
cublasCreate(&cublasHandles[i]);
cublasSetStream(cublasHandles[i], lsf_stream[i]);
}
}
void lsf_dispatch(Operation *tp, int index) {
assert(index < no_of_pipelines);
cudaStream_t &compute_stream = lsf_stream[index];
cudnnHandle_t &cudnn_handle = cudnnHandles[index];
cublasHandle_t &cublas_handle = cublasHandles[index];
int i = tp->op_layer - 1;
NeuralNet *nm = tp->model;
CnmemSpace space_tracker(nm->free_bytes); // need updates here
//-- std::cout << "here\n";
//-- std::cout << "Free bytes: " << nm->free_bytes << std::endl;
if (i == 0) { // this is the first layer, load and resize image as per
// current inference pipeline
/* image im = load_image_color(nm->imgfname, 0, 0);
//size? net->w in yolo
image r = letterbox_image(im,nm->input_w, nm->input_h );
//resize_network(net, resized.w, resized.h);
show_image(im,"orig",5);
show_image(r,"letterimg",5);
//copy image data into layer_input[0]
//memcpy(&(nm->layer_input[i]),r.data,nm->layer_input_size[i]*nm->data_type_size);
nm->lockedcnmemMalloc(&(nm->layer_input[0]), nm->layer_input_size[0] *
nm->data_type_size, NULL);*/
space_tracker.updateSpace(CnmemSpace::SUB,
nm->layer_input_size[0] * nm->data_type_size);
// checkCudaErrors(cudaMemcpy(nm->layer_input[0], r.data, nm->batch_size
// * nm->input_channels * nm->input_h * nm->input_w *
// nm->data_type_size, cudaMemcpyHostToDevice));
}
float alpha = 1.0, beta = 0.0;
float Salpha = 1.0, Sbeta = 0.0;
double Dalpha = 1.0, Dbeta = 0.0;
size_t cur_workspace_size;
void *cur_workspace;
// testingg
/* cudaEvent_t s,e;
float mss;
cudaEventCreate(&s);
cudaEventCreate(&e);
cudaEventRecord(s, nm->stream_compute);
*/
nm->lockedcnmemMalloc(&(nm->layer_input[i + 1]),
nm->layer_input_size[i + 1] * nm->data_type_size,
NULL);
/* cudaEventRecord(e, nm->stream_compute);
cudaEventSynchronize(e);
cudaEventElapsedTime(&mss,s,e);
printf("%f :",mss);
*/
space_tracker.updateSpace(CnmemSpace::SUB,
nm->layer_input_size[i + 1] * nm->data_type_size);
if (nm->layer_type[i] == CONV) {
tp->type = 'C';
// std::cout << "conv\n";
ConvLayerParams *cur_params = (ConvLayerParams *)nm->params[i];
cur_workspace_size = cur_params->fwd_workspace_size;
nm->lockedcnmemMalloc(&cur_workspace, cur_workspace_size, NULL);
// computation
checkCUDNN(cudnnConvolutionForward(
cudnn_handle, &alpha, cur_params->input_tensor, nm->layer_input[i],
cur_params->filter_desc, cur_params->W, cur_params->conv_desc,
cur_params->fwd_algo, cur_workspace, cur_workspace_size, &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
// custom coarsened cuda kernel
// customCoarsenedConvolutionForward((float *)nm->layer_input[i], (float
// *)nm->layer_input[i + 1], cur_params->conv_desc,
// cur_params->filter_desc, cur_params->input_tensor, (float
// *)cur_params->W, compute_stream);
// Batch Normalization
/* if (cur_params->bn == 1)
{
normalize_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->rolling_mean_gpu, (float
*)cur_params->rolling_variance_gpu, 1, cur_params->C_out,
cur_params->output_h * cur_params->output_w, compute_stream);
scale_bias_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->scales_gpu, 1, cur_params->C_out, cur_params->output_h *
cur_params->output_w, compute_stream); add_bias_gpu((float
*)nm->layer_input[i + 1], (float *)cur_params->b, 1, cur_params->C_out,
cur_params->output_h * cur_params->output_w, compute_stream);
}
else
{
add_bias_gpu((float *)nm->layer_input[i + 1], (float
*)cur_params->b, 1, cur_params->C_out, cur_params->output_h *
cur_params->output_w, compute_stream);
} */
checkCUDNN(cudnnAddTensor(
cudnn_handle, &alpha, cur_params->bias_desc, cur_params->b, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1]));
// if activation required
if (cur_params->activation_mode != ACTIVATION_NONE) {
// Replacing cuDNN call for relu to custom leaky relu call
// float *addr = (float *)(nm->layer_input[i + 1]);
// activate_array_gpu(addr, nm->layer_input_size[i + 1],
// compute_stream);
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
}
space_tracker.updateSpace(CnmemSpace::SUB, cur_workspace_size);
// std::cout << "Free bytes: " << free_bytes << std::endl;
} else if (nm->layer_type[i] == FULLY_CONNECTED) {
tp->type = 'F';
// std::cout << "FC\n";
FCLayerParams *cur_params = (FCLayerParams *)nm->params[i];
// std::cout << "FChere" << i << std::endl;
if (nm->data_type == CUDNN_DATA_FLOAT) {
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
nm->batch_size, cur_params->C_in, &Salpha,
(float *)cur_params->W, cur_params->C_out,
(float *)nm->layer_input[i], cur_params->C_in, &Sbeta,
(float *)nm->layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasSgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
nm->batch_size, 1, &Salpha, (float *)cur_params->b,
cur_params->C_out, (float *)nm->one_vec, 1, &Salpha,
(float *)nm->layer_input[i + 1], cur_params->C_out));
} else if (nm->data_type == CUDNN_DATA_DOUBLE) {
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
nm->batch_size, cur_params->C_in, &Dalpha,
(double *)cur_params->W, cur_params->C_out,
(double *)nm->layer_input[i], cur_params->C_in, &Dbeta,
(double *)nm->layer_input[i + 1], cur_params->C_out));
checkCUBLAS(cublasDgemm(
cublas_handle, CUBLAS_OP_N, CUBLAS_OP_N, cur_params->C_out,
nm->batch_size, 1, &Dalpha, (double *)cur_params->b,
cur_params->C_out, (double *)nm->one_vec, 1, &Dalpha,
(double *)nm->layer_input[i + 1], cur_params->C_out));
}
if (cur_params->activation_mode != ACTIVATION_NONE) {
// Replacing cuDNN call for Relu activation to custom Leaky Relu
// call
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->output_tensor, nm->layer_input[i + 1], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
// activate_array_gpu((float *)nm->layer_input[i + 1],
// nm->layer_input_size[i + 1], compute_stream);
}
// std::cout << "FChere" << i << std::endl;
}
else if (nm->layer_type[i] == DROPOUT) {
tp->type = 'D';
// std::cout << "Dropout\n";
DropoutLayerParams *cur_params = (DropoutLayerParams *)nm->params[i];
checkCUDNN(cudnnDropoutForward(
cudnn_handle, cur_params->dropout_desc, cur_params->input_tensor,
nm->layer_input[i], cur_params->input_tensor,
nm->layer_input[i + 1], cur_params->reserved_space,
cur_params->reserved_space_size));
} else if (nm->layer_type[i] == BATCHNORM) {
tp->type = 'B';
// std::cout << "Batchnorm\n";
BatchNormLayerParams *cur_params =
(BatchNormLayerParams *)nm->params[i];
checkCUDNN(cudnnBatchNormalizationForwardInference(
cudnn_handle, cur_params->mode, &alpha, &beta,
cur_params->input_tensor, nm->layer_input[i],
cur_params->input_tensor, nm->layer_input[i + 1],
cur_params->sbmv_desc, cur_params->scale, cur_params->bias,
cur_params->running_mean, cur_params->running_variance,
cur_params->epsilon));
} else if (nm->layer_type[i] == POOLING) {
tp->type = 'P';
// std::cout << "Pooling\n";
PoolingLayerParams *cur_params = (PoolingLayerParams *)nm->params[i];
checkCUDNN(cudnnPoolingForward(
cudnn_handle, cur_params->pool_desc, &alpha,
cur_params->input_tensor, nm->layer_input[i], &beta,
cur_params->output_tensor, nm->layer_input[i + 1]));
} else if (nm->layer_type[i] == ACTV) {
tp->type = 'A';
ActivationLayerParams *cur_params =
(ActivationLayerParams *)nm->params[i];
checkCUDNN(cudnnActivationForward(
cudnn_handle, cur_params->actv_desc, &alpha,
cur_params->input_tensor, nm->layer_input[i], &beta,
cur_params->input_tensor, nm->layer_input[i + 1]));
} else if (nm->layer_type[i] == REGION) { // Processing of region layer
tp->type = 'R';
// printf("Processing region layer %d",i);
// printf("Input layer size is %d output layer size is %d\n",
// nm->layer_input_size[i], nm->layer_input_size[i+1]);
RegionLayerParams *cur_params = (RegionLayerParams *)nm->params[i];
// printf("Batch size is %d\n", cur_params->batch_size);
forward_region_layer_gpu(
nm->layer_input_size[i], nm->layer_input_size[i + 1],
(float *)nm->layer_input[i], cur_params->batch_size,
cur_params->height, cur_params->width, cur_params->num,
cur_params->classes, cur_params->coords,
(float *)nm->layer_input[i + 1], compute_stream);
float *result =
(float *)malloc(nm->layer_input_size[i + 1] * sizeof(float));
checkCudaErrors(cudaMemcpy(result, nm->layer_input[i + 1],
nm->layer_input_size[i + 1] * sizeof(float),
cudaMemcpyDeviceToHost));
// int nbox=0;
// newly added block
//--detection *dets = make_network_boxes(cur_params,0.5, &nbox);
//--fill_network_boxes(cur_params,nm->img_w,nm->img_h, 0.5,0, dets,
// result, nm->layer_input_size[i+1], nm->input_w, nm->input_h);
// print_detector_detections(fps, id, dets, num, classes, w, h);
//----list *options = read_data_cfg("cfg/coco.data");
// char *name_list = option_find_str(options, "names",
// "data/names.list");
//----char *name_list = option_find_str(options, "names",
//"data/coco.names");
//--char **names = get_labels("data/coco.names");
//--image **alphabet = load_alphabet();
//--draw_detections(nm->im, dets, nbox, 0.5, names, alphabet,
// cur_params->classes);
//--save_image(nm->im, "predictions");
//--free_detections(dets, nbox);
} else if (nm->layer_type[i] == SOFTMAX) {
tp->type = 'S';
SoftmaxLayerParams *cur_params = (SoftmaxLayerParams *)nm->params[i];
// custom kernel call for softmax
softmax_gpu((float *)nm->layer_input[i], cur_params->channels,
cur_params->channels,
(nm->layer_input_size[i]) / cur_params->channels,
(cur_params->w) * (cur_params->h), 1,
(cur_params->w) * (cur_params->h), 1,
(float *)nm->layer_input[i + 1], compute_stream);
// cuDNN kernel call for Softmax
/*checkCUDNN(cudnnSoftmaxForward(nm->cudnn_handle, cur_params->algo,
cur_params->mode, &alpha, cur_params->input_tensor,
nm->layer_input[i], &beta, cur_params->input_tensor,
nm->layer_input[i + 1]));*/
//-Copy the result produced by softmax layer from GPU to CPU
// checkCudaErrors(cudaStreamSynchronize(nm->stream_compute));
// /////-----check....
float *result =
(float *)malloc(nm->layer_input_size[i + 1] * sizeof(float));
checkCudaErrors(cudaMemcpy(result, nm->layer_input[i + 1],
nm->layer_input_size[i + 1] * sizeof(float),
cudaMemcpyDeviceToHost));
// Infer the output class
// int *correct_count=0;
// nm->compareOutputCorrect(correct_count,nm->y);
// checkCNMEM(cnmemFree(nm->layer_input[nm->num_layers - 1],
// NULL)); space_tracker.updateSpace(CnmemSpace::ADD,
// nm->layer_input_size[nm->num_layers - 1] * nm->data_type_size);
//--
int top = 5;
list *options =
read_data_cfg("data/imagenet1k.data"); // specify name of the file
char *name_list = option_find_str(options, "names", 0);
if (!name_list)
name_list = option_find_str(options, "labels", "data/labels.list");
if (top == 0)
top = option_find_int(options, "top", 1);
int ii = 0;
char **names = get_labels(name_list);
// clock_t time;
int *indexes = (int *)calloc(top, sizeof(int));
// time=clock();
top_k(result, nm->layer_input_size[i + 1], top,
indexes); // check parameters of this function
// fprintf(stderr, "%s: Predicted in %f seconds.\n", input,
// sec(clock()-time));
for (ii = 0; ii < top; ++ii) {
// int index = indexes[ii];
// if(net->hierarchy) printf("%d, %s: %f, parent: %s \n",index,
// names[index], predictions[index], (net->hierarchy->parent[index]
// >= 0) ? names[net->hierarchy->parent[index]] : "Root"); else
// printf("%s: %f\n",names[index], predictions[index]);
// printf("index is %d: %5.2f%%: %s\n",index, result[index]*100,
// names[index]); printf("index is %d: %s\n",index, names[index]);
}
}
if (nm->layer_type[i] == CONV) {
nm->lockedcnmemFree(cur_workspace, NULL);
space_tracker.updateSpace(CnmemSpace::ADD, cur_workspace_size);
}
// kCudaErrors(cudaStreamSynchronize(nm->stream_compute));
// free the memory allocated to layer_input[i]
// nm->lockedcnmemFree(nm->layer_input[i], NULL);
// space_tracker.updateSpace(CnmemSpace::ADD, nm->layer_input_size[i] *
// nm->data_type_size);
}
|
07561880e42b69ce4395b50b9ef33b39ff1a39b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <call_kernel.h>
//xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
hipMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
for (int i = 0; i < N; i++)
a[i] = i;
hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice);
printf("a: ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
//inline_test<<<1,N>>>(dev_a, 2); //you can change this offset for tests
ESBMC_verify_kernel_intt(inline_test, 1, N, dev_a, 2);
hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost);
printf("\nFunction Results:\n ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
free(a);
hipFree(dev_a);
return 0;
}
| 07561880e42b69ce4395b50b9ef33b39ff1a39b1.cu | #include <call_kernel.h>
//xfail:BOOGIE_ERROR
//main.cu: error: possible read-write race
//however, this didn't happen in the tests
//altough in CUDA providing the inline keyword should still keep a copy of the function around,
//this kind of access is considered a error by ESBMC
//ps: the values from A[N-1-offset) to A[N-1] always will receive unpredictable values,
//because they acess values because they access memory positions that were not initiated
#include <stdio.h>
#include <cuda.h>
#include <cuda_runtime_api.h>
#define tid threadIdx.x
#define N 2//1024
__device__ inline void inlined(int *A, int offset)
{
int temp = A[tid + offset];
A[tid] += temp;
}
__global__ void inline_test(int *A, int offset) {
inlined(A, offset);
}
int main(){
int *a;
int *dev_a;
int size = N*sizeof(int);
cudaMalloc((void**)&dev_a, size);
a = (int*)malloc(N*size);
for (int i = 0; i < N; i++)
a[i] = i;
cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice);
printf("a: ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
//inline_test<<<1,N>>>(dev_a, 2); //you can change this offset for tests
ESBMC_verify_kernel_intt(inline_test, 1, N, dev_a, 2);
cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost);
printf("\nFunction Results:\n ");
//for (int i = 0; i < N; i++)
// printf("%d ", a[i]);
free(a);
cudaFree(dev_a);
return 0;
}
|
ef9f91bcc3f71c28e4b24dc6bc48c7f955fda9fc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "matvec.h"
#include "constants.h"
__global__ void penaltyComputeKernel(double *d_pnlBtmMat, double *d_lmkBtmMat, double *d_vlcMat,
int *d_btmVtxMat, int lmkNum, int btmElmNum)
{
int btmElmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( btmElmIdx < btmElmNum )
{
vector q0Vec, q1Vec;
getBoundary(q0Vec, q1Vec, d_lmkBtmMat, btmElmIdx, btmElmNum);
int q0Idx = d_btmVtxMat[ btmElmIdx];
int q1Idx = d_btmVtxMat[btmElmNum + btmElmIdx];
vector v0Vec, v1Vec;
getVector(v0Vec, d_vlcMat, q0Idx, lmkNum);
getVector(v1Vec, d_vlcMat, q1Idx, lmkNum);
vector tanVec;
vectorSubtract(tanVec, q1Vec, q0Vec);
double tanLen = eucnorm(tanVec);
vector dv0PVec, dv1PVec;
dv0PVec.x = v0Vec.x * tanLen / (VTXNUM - 1.0);
dv0PVec.y = v0Vec.y * tanLen / (VTXNUM - 1.0);
dv1PVec.x = v1Vec.x * tanLen / (VTXNUM - 1.0);
dv1PVec.y = v1Vec.y * tanLen / (VTXNUM - 1.0);
setBoundary(d_pnlBtmMat, dv0PVec, dv1PVec, btmElmIdx, btmElmNum);
}
return;
}
__global__ void penaltyGatherKernel(double *d_pnlMat, double *d_pnlBtmMat, int *d_vtxBtmMat,
int btmElmNum, int lmkNum, int btmLmkNum)
{
int btmLmkIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( btmLmkIdx < btmLmkNum )
{
vector pnlVec = {0.0, 0.0};
int adjNum = d_vtxBtmMat[btmLmkIdx];
for ( int adjIdx = 0; adjIdx < adjNum; ++adjIdx )
{
int btmElmIdx = d_vtxBtmMat[(1 + 2 * adjIdx ) * btmLmkNum + btmLmkIdx];
int lclIdx = d_vtxBtmMat[(1 + 2 * adjIdx + 1) * btmLmkNum + btmLmkIdx];
vector pnlBtmVec;
getVector(pnlBtmVec, d_pnlBtmMat + lclIdx * btmElmNum * DIMNUM, btmElmIdx, btmElmNum);
vectorSum(pnlVec, pnlVec, pnlBtmVec);
}
setVector(d_pnlMat, pnlVec, btmLmkIdx, lmkNum);
}
return;
}
void applyPenaltyOperator(double *d_pnlMat, double *d_lmkBtmMat, double *d_vlcMat,
double *d_pnlBtmMat, int *d_btmVtxMat, int *d_vtxBtmMat,
int lmkNum, int btmElmNum, int btmLmkNum)
{
int blkNum = (btmElmNum - 1) / BLKDIM + 1;
hipLaunchKernelGGL(( penaltyComputeKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_pnlBtmMat, d_lmkBtmMat, d_vlcMat,
d_btmVtxMat, lmkNum, btmElmNum);
blkNum = (btmLmkNum - 1) / BLKDIM + 1;
hipMemset(d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM);
hipLaunchKernelGGL(( penaltyGatherKernel) , dim3(blkNum), dim3(BLKDIM), 0, 0, d_pnlMat, d_pnlBtmMat,
d_vtxBtmMat, btmElmNum, lmkNum, btmLmkNum);
return;
}
| ef9f91bcc3f71c28e4b24dc6bc48c7f955fda9fc.cu | #include "matvec.h"
#include "constants.h"
__global__ void penaltyComputeKernel(double *d_pnlBtmMat, double *d_lmkBtmMat, double *d_vlcMat,
int *d_btmVtxMat, int lmkNum, int btmElmNum)
{
int btmElmIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( btmElmIdx < btmElmNum )
{
vector q0Vec, q1Vec;
getBoundary(q0Vec, q1Vec, d_lmkBtmMat, btmElmIdx, btmElmNum);
int q0Idx = d_btmVtxMat[ btmElmIdx];
int q1Idx = d_btmVtxMat[btmElmNum + btmElmIdx];
vector v0Vec, v1Vec;
getVector(v0Vec, d_vlcMat, q0Idx, lmkNum);
getVector(v1Vec, d_vlcMat, q1Idx, lmkNum);
vector tanVec;
vectorSubtract(tanVec, q1Vec, q0Vec);
double tanLen = eucnorm(tanVec);
vector dv0PVec, dv1PVec;
dv0PVec.x = v0Vec.x * tanLen / (VTXNUM - 1.0);
dv0PVec.y = v0Vec.y * tanLen / (VTXNUM - 1.0);
dv1PVec.x = v1Vec.x * tanLen / (VTXNUM - 1.0);
dv1PVec.y = v1Vec.y * tanLen / (VTXNUM - 1.0);
setBoundary(d_pnlBtmMat, dv0PVec, dv1PVec, btmElmIdx, btmElmNum);
}
return;
}
__global__ void penaltyGatherKernel(double *d_pnlMat, double *d_pnlBtmMat, int *d_vtxBtmMat,
int btmElmNum, int lmkNum, int btmLmkNum)
{
int btmLmkIdx = blockIdx.x * blockDim.x + threadIdx.x;
if ( btmLmkIdx < btmLmkNum )
{
vector pnlVec = {0.0, 0.0};
int adjNum = d_vtxBtmMat[btmLmkIdx];
for ( int adjIdx = 0; adjIdx < adjNum; ++adjIdx )
{
int btmElmIdx = d_vtxBtmMat[(1 + 2 * adjIdx ) * btmLmkNum + btmLmkIdx];
int lclIdx = d_vtxBtmMat[(1 + 2 * adjIdx + 1) * btmLmkNum + btmLmkIdx];
vector pnlBtmVec;
getVector(pnlBtmVec, d_pnlBtmMat + lclIdx * btmElmNum * DIMNUM, btmElmIdx, btmElmNum);
vectorSum(pnlVec, pnlVec, pnlBtmVec);
}
setVector(d_pnlMat, pnlVec, btmLmkIdx, lmkNum);
}
return;
}
void applyPenaltyOperator(double *d_pnlMat, double *d_lmkBtmMat, double *d_vlcMat,
double *d_pnlBtmMat, int *d_btmVtxMat, int *d_vtxBtmMat,
int lmkNum, int btmElmNum, int btmLmkNum)
{
int blkNum = (btmElmNum - 1) / BLKDIM + 1;
penaltyComputeKernel <<<blkNum, BLKDIM>>> (d_pnlBtmMat, d_lmkBtmMat, d_vlcMat,
d_btmVtxMat, lmkNum, btmElmNum);
blkNum = (btmLmkNum - 1) / BLKDIM + 1;
cudaMemset(d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM);
penaltyGatherKernel <<<blkNum, BLKDIM>>> (d_pnlMat, d_pnlBtmMat,
d_vtxBtmMat, btmElmNum, lmkNum, btmLmkNum);
return;
}
|
10a967cc1c136bc6c8099e3397eb0ee408117ff3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include <thrust/tabulate.h>
#include "join/joining.h"
#include "gdf_table.cuh"
#include "hashmap/hash_functions.cuh"
#include "int_fastdiv.h"
#include "rmm.h"
#include "nvtx_utils.h"
constexpr int BLOCK_SIZE = 256;
constexpr int ROWS_PER_THREAD = 1;
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This function determines if a number is a power of 2.
*
* @Param number The number to check.
*
* @Returns True if the number is a power of 2.
*/
/* ----------------------------------------------------------------------------*/
template <typename T>
bool is_power_two( T number )
{
return (0 == (number & (number - 1)));
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This functor is used to compute the hash value for the rows
* of a gdf_table
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename size_type>
struct row_hasher
{
row_hasher(gdf_table<size_type> const & table_to_hash)
: the_table{table_to_hash}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index);
}
gdf_table<size_type> const & the_table;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the hash value of each row in the input set of columns.
*
* @Param num_cols The number of columns in the input set
* @Param input The list of columns whose rows will be hashed
* @Param hash The hash function to use
* @Param output The hash value of each row of the input
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash(int num_cols, gdf_column **input, gdf_hash_func hash, gdf_column *output)
{
// Ensure inputs aren't null
if((0 == num_cols)
|| (nullptr == input)
|| (nullptr == output))
{
return GDF_DATASET_EMPTY;
}
// check that the output dtype is int32
// TODO: do we need to support int64 as well?
if (output->dtype != GDF_INT32)
{
return GDF_UNSUPPORTED_DTYPE;
}
// Return immediately for empty input/output
if(nullptr != input[0]) {
if(0 == input[0]->size){
return GDF_SUCCESS;
}
}
if(0 == output->size) {
return GDF_SUCCESS;
}
else if(nullptr == output->data) {
return GDF_DATASET_EMPTY;
}
using size_type = int64_t;
// Wrap input columns in gdf_table
std::unique_ptr< gdf_table<size_type> > input_table{new gdf_table<size_type>(num_cols, input)};
const size_type num_rows = input_table->get_column_length();
// Wrap output buffer in Thrust device_ptr
hash_value_type * p_output = static_cast<hash_value_type*>(output->data);
thrust::device_ptr<hash_value_type> row_hash_values = thrust::device_pointer_cast(p_output);
hipStream_t stream{0};
rmm_temp_allocator allocator(stream);
auto exec = thrust::hip::par(allocator).on(stream);
// Compute the hash value for each row depending on the specified hash function
switch(hash)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(exec,
row_hash_values,
row_hash_values + num_rows,
row_hasher<MurmurHash3_32,size_type>(*input_table));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(exec,
row_hash_values,
row_hash_values + num_rows,
row_hasher<IdentityHash,size_type>(*input_table));
break;
}
default:
return GDF_INVALID_HASH_FUNCTION;
}
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the FAST modulo operation implemented in int_fastdiv from here:
* https://github.com/milakov/int_fastdiv
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct fast_modulo_partitioner
{
fast_modulo_partitioner(int num_partitions) : fast_divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
// Using int_fastdiv casts 'hash_value' to an int, which can
// result in negative modulos, requiring taking the absolute value
// Because of the casting it can also return results that are not
// the same as using the normal % operator
output_type partition_number = std::abs(hash_value % fast_divisor);
return partition_number;
}
const int_fastdiv fast_divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct modulo_partitioner
{
modulo_partitioner(size_type num_partitions) : divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value % divisor;
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses bitshifts. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct bitwise_partitioner
{
bitwise_partitioner(size_type num_partitions) : divisor{(num_partitions - 1)}
{
assert( is_power_two(num_partitions) );
}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value & (divisor);
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a gdf_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @Param[in] the_table The table whose rows will be partitioned
* @Param[in] num_rows The number of rows in the table
* @Param[in] num_partitions The number of partitions to divide the rows into
* @Param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @Param[out] row_partition_numbers Array that holds which partition each row belongs to
* @Param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @Param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename partitioner_type,
typename size_type>
__global__
void compute_row_partition_numbers(gdf_table<size_type> const & the_table,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type * row_partition_numbers,
size_type * block_partition_sizes,
size_type * global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while( row_number < num_rows)
{
// See here why template disambiguator is required:
// https://stackoverflow.com/questions/4077110/template-disambiguator
const hash_value_type row_hash_value = the_table.template hash_row<hash_function>(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @Param row_partition_numbers The array that records the partition number for each row
* @Param num_rows The number of rows
* @Param num_partitions THe number of partitions
* @Param[out] block_partition_offsets Array that holds the offset of each partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void compute_row_output_locations(size_type * row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type * block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number= threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_offsets[partition_number] = block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while( row_number < num_rows )
{
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location = atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Partitions an input gdf_table into a specified number of partitions.
* A hash value is computed for each row in a sub-set of the columns of the
* input table. Each hash value is placed in a bin from [0, number of partitions).
* A copy of the input table is created where the rows are rearranged such that
* rows with hash values in the same bin are contiguous.
*
* @Param[in] input_table The table to partition
* @Param[in] table_to_hash Sub-table of the input table with only the columns
* that will be hashed
* @Param[in] num_partitions The number of partitions that table will be rearranged into
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @tparam hash_function The hash function that will be used to hash the rows
*/
/* ----------------------------------------------------------------------------*/
template < template <typename> class hash_function,
typename size_type>
gdf_error hash_partition_gdf_table(gdf_table<size_type> const & input_table,
gdf_table<size_type> const & table_to_hash,
const size_type num_partitions,
size_type * partition_offsets,
gdf_table<size_type> & partitioned_output)
{
const size_type num_rows = table_to_hash.get_column_length();
constexpr int rows_per_block = BLOCK_SIZE * ROWS_PER_THREAD;
const size_type grid_size = (num_rows + rows_per_block - 1) / rows_per_block;
// Allocate array to hold which partition each row belongs to
size_type * row_partition_numbers{nullptr};
RMM_TRY( rmmAlloc((void**)&row_partition_numbers, num_rows * sizeof(hash_value_type), 0) ); // TODO: non-default stream?
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
size_type * block_partition_sizes{nullptr};
RMM_TRY(rmmAlloc((void**)&block_partition_sizes, (grid_size * num_partitions) * sizeof(size_type), 0) );
// Holds the total number of rows in each partition
size_type * global_partition_sizes{nullptr};
RMM_TRY( rmmAlloc((void**)&global_partition_sizes, num_partitions * sizeof(size_type), 0) );
CUDA_TRY( hipMemsetAsync(global_partition_sizes, 0, num_partitions * sizeof(size_type)) );
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if( true == is_power_two(num_partitions) )
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers<hash_function>)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
else
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
hipLaunchKernelGGL(( compute_row_partition_numbers<hash_function>)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
CUDA_CHECK_LAST();
hipStream_t stream{0}; // TODO: non-default stream?
rmm_temp_allocator allocator(stream);
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
size_type * scanned_block_partition_sizes{block_partition_sizes};
thrust::exclusive_scan(thrust::hip::par(allocator).on(stream),
block_partition_sizes,
block_partition_sizes + (grid_size * num_partitions),
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output. This can be done independently on a separate stream
hipStream_t s1{};
hipStreamCreate(&s1);
size_type * scanned_global_partition_sizes{global_partition_sizes};
thrust::exclusive_scan(thrust::hip::par(allocator).on(s1),
global_partition_sizes,
global_partition_sizes + num_partitions,
scanned_global_partition_sizes);
CUDA_CHECK_LAST();
// Copy the result of the exlusive scan to the output offsets array
// to indicate the starting point for each partition in the output
CUDA_TRY(hipMemcpyAsync(partition_offsets,
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
hipMemcpyDeviceToHost,
s1));
// Compute the output location for each row in-place based on it's
// partition number such that each partition will be contiguous in memory
size_type * row_output_locations{row_partition_numbers};
hipLaunchKernelGGL(( compute_row_output_locations)
, dim3(grid_size), dim3(BLOCK_SIZE), num_partitions * sizeof(size_type), 0, row_output_locations,
num_rows,
num_partitions,
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Creates the partitioned output table by scattering the rows of
// the input table to rows of the output table based on each rows
// output location
gdf_error gdf_error_code = input_table.scatter(partitioned_output,
row_output_locations);
if(GDF_SUCCESS != gdf_error_code){
return gdf_error_code;
}
CUDA_CHECK_LAST();
RMM_TRY(rmmFree(row_partition_numbers, 0));
RMM_TRY(rmmFree(block_partition_sizes, 0));
hipStreamSynchronize(s1);
hipStreamDestroy(s1);
RMM_TRY(rmmFree(global_partition_sizes, 0));
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the hash values of the specified rows in the input columns and
* bins the hash values into the desired number of partitions. Rearranges the input
* columns such that rows with hash values in the same bin are contiguous.
*
* @Param[in] num_input_cols The number of columns in the input columns
* @Param[in] input[] The input set of columns
* @Param[in] columns_to_hash[] Indices of the columns in the input set to hash
* @Param[in] num_cols_to_hash The number of columns to hash
* @Param[in] num_partitions The number of partitions to rearrange the input rows into
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[in] hash The hash function to use
*
* @Returns If the operation was successful, returns GDF_SUCCESS
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash_partition(int num_input_cols,
gdf_column * input[],
int columns_to_hash[],
int num_cols_to_hash,
int num_partitions,
gdf_column * partitioned_output[],
int partition_offsets[],
gdf_hash_func hash)
{
// Use int until gdf API is updated to use something other than int
// for ordinal variables
using size_type = int;
// Ensure all the inputs are non-zero and not null
if((0 == num_input_cols)
|| (0 == num_cols_to_hash)
|| (0 == num_partitions)
|| (nullptr == input)
|| (nullptr == partitioned_output)
|| (nullptr == columns_to_hash)
|| (nullptr == partition_offsets))
{
return GDF_INVALID_API_CALL;
}
const size_t num_rows{input[0]->size};
// If the input is empty, return immediately
if(0 == num_rows)
{
return GDF_SUCCESS;
}
// TODO Check if the num_rows is > MAX_ROWS (MAX_INT)
// check that the columns data are not null, have matching types,
// and the same number of rows
for (size_type i = 0; i < num_input_cols; i++) {
if( (nullptr == input[i]->data)
|| (nullptr == partitioned_output[i]->data))
return GDF_DATASET_EMPTY;
if(input[i]->dtype != partitioned_output[i]->dtype)
return GDF_PARTITION_DTYPE_MISMATCH;
if((num_rows != input[i]->size)
|| (num_rows != partitioned_output[i]->size))
return GDF_COLUMN_SIZE_MISMATCH;
}
PUSH_RANGE("LIBGDF_HASH_PARTITION", PARTITION_COLOR);
// Wrap input and output columns in gdf_table
std::unique_ptr< const gdf_table<size_type> > input_table{new gdf_table<size_type>(num_input_cols, input)};
std::unique_ptr< gdf_table<size_type> > output_table{new gdf_table<size_type>(num_input_cols, partitioned_output)};
// Create vector of pointers to columns that will be hashed
std::vector<gdf_column *> gdf_columns_to_hash(num_cols_to_hash);
for(size_type i = 0; i < num_cols_to_hash; ++i)
{
gdf_columns_to_hash[i] = input[columns_to_hash[i]];
}
// Create a separate table of the columns to be hashed
std::unique_ptr< const gdf_table<size_type> > table_to_hash {new gdf_table<size_type>(num_cols_to_hash,
gdf_columns_to_hash.data())};
gdf_error gdf_status{GDF_SUCCESS};
switch(hash)
{
case GDF_HASH_MURMUR3:
{
gdf_status = hash_partition_gdf_table<MurmurHash3_32>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
case GDF_HASH_IDENTITY:
{
gdf_status = hash_partition_gdf_table<IdentityHash>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
default:
gdf_status = GDF_INVALID_HASH_FUNCTION;
}
POP_RANGE();
return gdf_status;
}
| 10a967cc1c136bc6c8099e3397eb0ee408117ff3.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gdf/gdf.h>
#include <gdf/errorutils.h>
#include <thrust/tabulate.h>
#include "join/joining.h"
#include "gdf_table.cuh"
#include "hashmap/hash_functions.cuh"
#include "int_fastdiv.h"
#include "rmm.h"
#include "nvtx_utils.h"
constexpr int BLOCK_SIZE = 256;
constexpr int ROWS_PER_THREAD = 1;
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This function determines if a number is a power of 2.
*
* @Param number The number to check.
*
* @Returns True if the number is a power of 2.
*/
/* ----------------------------------------------------------------------------*/
template <typename T>
bool is_power_two( T number )
{
return (0 == (number & (number - 1)));
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis This functor is used to compute the hash value for the rows
* of a gdf_table
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename size_type>
struct row_hasher
{
row_hasher(gdf_table<size_type> const & table_to_hash)
: the_table{table_to_hash}
{}
__device__
hash_value_type operator()(size_type row_index) const
{
return the_table.template hash_row<hash_function>(row_index);
}
gdf_table<size_type> const & the_table;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Computes the hash value of each row in the input set of columns.
*
* @Param num_cols The number of columns in the input set
* @Param input The list of columns whose rows will be hashed
* @Param hash The hash function to use
* @Param output The hash value of each row of the input
*
* @Returns
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash(int num_cols, gdf_column **input, gdf_hash_func hash, gdf_column *output)
{
// Ensure inputs aren't null
if((0 == num_cols)
|| (nullptr == input)
|| (nullptr == output))
{
return GDF_DATASET_EMPTY;
}
// check that the output dtype is int32
// TODO: do we need to support int64 as well?
if (output->dtype != GDF_INT32)
{
return GDF_UNSUPPORTED_DTYPE;
}
// Return immediately for empty input/output
if(nullptr != input[0]) {
if(0 == input[0]->size){
return GDF_SUCCESS;
}
}
if(0 == output->size) {
return GDF_SUCCESS;
}
else if(nullptr == output->data) {
return GDF_DATASET_EMPTY;
}
using size_type = int64_t;
// Wrap input columns in gdf_table
std::unique_ptr< gdf_table<size_type> > input_table{new gdf_table<size_type>(num_cols, input)};
const size_type num_rows = input_table->get_column_length();
// Wrap output buffer in Thrust device_ptr
hash_value_type * p_output = static_cast<hash_value_type*>(output->data);
thrust::device_ptr<hash_value_type> row_hash_values = thrust::device_pointer_cast(p_output);
cudaStream_t stream{0};
rmm_temp_allocator allocator(stream);
auto exec = thrust::cuda::par(allocator).on(stream);
// Compute the hash value for each row depending on the specified hash function
switch(hash)
{
case GDF_HASH_MURMUR3:
{
thrust::tabulate(exec,
row_hash_values,
row_hash_values + num_rows,
row_hasher<MurmurHash3_32,size_type>(*input_table));
break;
}
case GDF_HASH_IDENTITY:
{
thrust::tabulate(exec,
row_hash_values,
row_hash_values + num_rows,
row_hasher<IdentityHash,size_type>(*input_table));
break;
}
default:
return GDF_INVALID_HASH_FUNCTION;
}
CUDA_CHECK_LAST();
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the FAST modulo operation implemented in int_fastdiv from here:
* https://github.com/milakov/int_fastdiv
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct fast_modulo_partitioner
{
fast_modulo_partitioner(int num_partitions) : fast_divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
// Using int_fastdiv casts 'hash_value' to an int, which can
// result in negative modulos, requiring taking the absolute value
// Because of the casting it can also return results that are not
// the same as using the normal % operator
output_type partition_number = std::abs(hash_value % fast_divisor);
return partition_number;
}
const int_fastdiv fast_divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses the modulo operation.
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct modulo_partitioner
{
modulo_partitioner(size_type num_partitions) : divisor{num_partitions}{}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value % divisor;
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Functor to map a hash value to a particular 'bin' or partition number
* that uses bitshifts. Only works when num_partitions is a power of 2.
*
* For n % d, if d is a power of two, then it can be computed more efficiently via
* a single bitwise AND as:
* n & (d - 1)
*/
/* ----------------------------------------------------------------------------*/
template <typename hash_value_t,
typename size_type,
typename output_type>
struct bitwise_partitioner
{
bitwise_partitioner(size_type num_partitions) : divisor{(num_partitions - 1)}
{
assert( is_power_two(num_partitions) );
}
__host__ __device__
output_type operator()(hash_value_t hash_value) const
{
return hash_value & (divisor);
}
const size_type divisor;
};
/* --------------------------------------------------------------------------*/
/**
* @brief Computes which partition each row of a gdf_table will belong to based
on hashing each row, and applying a partition function to the hash value.
Records the size of each partition for each thread block as well as the global
size of each partition across all thread blocks.
*
* @Param[in] the_table The table whose rows will be partitioned
* @Param[in] num_rows The number of rows in the table
* @Param[in] num_partitions The number of partitions to divide the rows into
* @Param[in] the_partitioner The functor that maps a rows hash value to a partition number
* @Param[out] row_partition_numbers Array that holds which partition each row belongs to
* @Param[out] block_partition_sizes Array that holds the size of each partition for each block,
* i.e., { {block0 partition0 size, block1 partition0 size, ...},
{block0 partition1 size, block1 partition1 size, ...},
...
{block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
* @Param[out] global_partition_sizes The number of rows in each partition.
*/
/* ----------------------------------------------------------------------------*/
template <template <typename> class hash_function,
typename partitioner_type,
typename size_type>
__global__
void compute_row_partition_numbers(gdf_table<size_type> const & the_table,
const size_type num_rows,
const size_type num_partitions,
const partitioner_type the_partitioner,
size_type * row_partition_numbers,
size_type * block_partition_sizes,
size_type * global_partition_sizes)
{
// Accumulate histogram of the size of each partition in shared memory
extern __shared__ size_type shared_partition_sizes[];
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Initialize local histogram
size_type partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_sizes[partition_number] = 0;
partition_number += blockDim.x;
}
__syncthreads();
// Compute the hash value for each row, store it to the array of hash values
// and compute the partition to which the hash value belongs and increment
// the shared memory counter for that partition
while( row_number < num_rows)
{
// See here why template disambiguator is required:
// https://stackoverflow.com/questions/4077110/template-disambiguator
const hash_value_type row_hash_value = the_table.template hash_row<hash_function>(row_number);
const size_type partition_number = the_partitioner(row_hash_value);
row_partition_numbers[row_number] = partition_number;
atomicAdd(&(shared_partition_sizes[partition_number]), size_type(1));
row_number += blockDim.x * gridDim.x;
}
__syncthreads();
// Flush shared memory histogram to global memory
partition_number = threadIdx.x;
while(partition_number < num_partitions)
{
const size_type block_partition_size = shared_partition_sizes[partition_number];
// Update global size of each partition
atomicAdd(&global_partition_sizes[partition_number], block_partition_size);
// Record the size of this partition in this block
const size_type write_location = partition_number * gridDim.x + blockIdx.x;
block_partition_sizes[write_location] = block_partition_size;
partition_number += blockDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @Synopsis Given an array of partition numbers, computes the final output location
for each element in the output such that all rows with the same partition are
contiguous in memory.
*
* @Param row_partition_numbers The array that records the partition number for each row
* @Param num_rows The number of rows
* @Param num_partitions THe number of partitions
* @Param[out] block_partition_offsets Array that holds the offset of each partition for each thread block,
* i.e., { {block0 partition0 offset, block1 partition0 offset, ...},
{block0 partition1 offset, block1 partition1 offset, ...},
...
{block0 partition(num_partitions-1) offset, block1 partition(num_partitions -1) offset, ...} }
*/
/* ----------------------------------------------------------------------------*/
template <typename size_type>
__global__
void compute_row_output_locations(size_type * row_partition_numbers,
const size_type num_rows,
const size_type num_partitions,
size_type * block_partition_offsets)
{
// Shared array that holds the offset of this blocks partitions in
// global memory
extern __shared__ size_type shared_partition_offsets[];
// Initialize array of this blocks offsets from global array
size_type partition_number= threadIdx.x;
while(partition_number < num_partitions)
{
shared_partition_offsets[partition_number] = block_partition_offsets[partition_number * gridDim.x + blockIdx.x];
partition_number += blockDim.x;
}
__syncthreads();
size_type row_number = threadIdx.x + blockIdx.x * blockDim.x;
// Get each row's partition number, and get it's output location by
// incrementing block's offset counter for that partition number
// and store the row's output location in-place
while( row_number < num_rows )
{
// Get partition number of this row
const size_type partition_number = row_partition_numbers[row_number];
// Get output location based on partition number by incrementing the corresponding
// partition offset for this block
const size_type row_output_location = atomicAdd(&(shared_partition_offsets[partition_number]), size_type(1));
// Store the row's output location in-place
row_partition_numbers[row_number] = row_output_location;
row_number += blockDim.x * gridDim.x;
}
}
/* --------------------------------------------------------------------------*/
/**
* @brief Partitions an input gdf_table into a specified number of partitions.
* A hash value is computed for each row in a sub-set of the columns of the
* input table. Each hash value is placed in a bin from [0, number of partitions).
* A copy of the input table is created where the rows are rearranged such that
* rows with hash values in the same bin are contiguous.
*
* @Param[in] input_table The table to partition
* @Param[in] table_to_hash Sub-table of the input table with only the columns
* that will be hashed
* @Param[in] num_partitions The number of partitions that table will be rearranged into
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @tparam hash_function The hash function that will be used to hash the rows
*/
/* ----------------------------------------------------------------------------*/
template < template <typename> class hash_function,
typename size_type>
gdf_error hash_partition_gdf_table(gdf_table<size_type> const & input_table,
gdf_table<size_type> const & table_to_hash,
const size_type num_partitions,
size_type * partition_offsets,
gdf_table<size_type> & partitioned_output)
{
const size_type num_rows = table_to_hash.get_column_length();
constexpr int rows_per_block = BLOCK_SIZE * ROWS_PER_THREAD;
const size_type grid_size = (num_rows + rows_per_block - 1) / rows_per_block;
// Allocate array to hold which partition each row belongs to
size_type * row_partition_numbers{nullptr};
RMM_TRY( rmmAlloc((void**)&row_partition_numbers, num_rows * sizeof(hash_value_type), 0) ); // TODO: non-default stream?
// Array to hold the size of each partition computed by each block
// i.e., { {block0 partition0 size, block1 partition0 size, ...},
// {block0 partition1 size, block1 partition1 size, ...},
// ...
// {block0 partition(num_partitions-1) size, block1 partition(num_partitions -1) size, ...} }
size_type * block_partition_sizes{nullptr};
RMM_TRY(rmmAlloc((void**)&block_partition_sizes, (grid_size * num_partitions) * sizeof(size_type), 0) );
// Holds the total number of rows in each partition
size_type * global_partition_sizes{nullptr};
RMM_TRY( rmmAlloc((void**)&global_partition_sizes, num_partitions * sizeof(size_type), 0) );
CUDA_TRY( cudaMemsetAsync(global_partition_sizes, 0, num_partitions * sizeof(size_type)) );
// If the number of partitions is a power of two, we can compute the partition
// number of each row more efficiently with bitwise operations
if( true == is_power_two(num_partitions) )
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = bitwise_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<hash_function>
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
else
{
// Determines how the mapping between hash value and partition number is computed
using partitioner_type = modulo_partitioner<hash_value_type, size_type, size_type>;
// Computes which partition each row belongs to by hashing the row and performing
// a partitioning operator on the hash value. Also computes the number of
// rows in each partition both for each thread block as well as across all blocks
compute_row_partition_numbers<hash_function>
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(table_to_hash,
num_rows,
num_partitions,
partitioner_type(num_partitions),
row_partition_numbers,
block_partition_sizes,
global_partition_sizes);
}
CUDA_CHECK_LAST();
cudaStream_t stream{0}; // TODO: non-default stream?
rmm_temp_allocator allocator(stream);
// Compute exclusive scan of all blocks' partition sizes in-place to determine
// the starting point for each blocks portion of each partition in the output
size_type * scanned_block_partition_sizes{block_partition_sizes};
thrust::exclusive_scan(thrust::cuda::par(allocator).on(stream),
block_partition_sizes,
block_partition_sizes + (grid_size * num_partitions),
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Compute exclusive scan of size of each partition to determine offset location
// of each partition in final output. This can be done independently on a separate stream
cudaStream_t s1{};
cudaStreamCreate(&s1);
size_type * scanned_global_partition_sizes{global_partition_sizes};
thrust::exclusive_scan(thrust::cuda::par(allocator).on(s1),
global_partition_sizes,
global_partition_sizes + num_partitions,
scanned_global_partition_sizes);
CUDA_CHECK_LAST();
// Copy the result of the exlusive scan to the output offsets array
// to indicate the starting point for each partition in the output
CUDA_TRY(cudaMemcpyAsync(partition_offsets,
scanned_global_partition_sizes,
num_partitions * sizeof(size_type),
cudaMemcpyDeviceToHost,
s1));
// Compute the output location for each row in-place based on it's
// partition number such that each partition will be contiguous in memory
size_type * row_output_locations{row_partition_numbers};
compute_row_output_locations
<<<grid_size, BLOCK_SIZE, num_partitions * sizeof(size_type)>>>(row_output_locations,
num_rows,
num_partitions,
scanned_block_partition_sizes);
CUDA_CHECK_LAST();
// Creates the partitioned output table by scattering the rows of
// the input table to rows of the output table based on each rows
// output location
gdf_error gdf_error_code = input_table.scatter(partitioned_output,
row_output_locations);
if(GDF_SUCCESS != gdf_error_code){
return gdf_error_code;
}
CUDA_CHECK_LAST();
RMM_TRY(rmmFree(row_partition_numbers, 0));
RMM_TRY(rmmFree(block_partition_sizes, 0));
cudaStreamSynchronize(s1);
cudaStreamDestroy(s1);
RMM_TRY(rmmFree(global_partition_sizes, 0));
return GDF_SUCCESS;
}
/* --------------------------------------------------------------------------*/
/**
* @brief Computes the hash values of the specified rows in the input columns and
* bins the hash values into the desired number of partitions. Rearranges the input
* columns such that rows with hash values in the same bin are contiguous.
*
* @Param[in] num_input_cols The number of columns in the input columns
* @Param[in] input[] The input set of columns
* @Param[in] columns_to_hash[] Indices of the columns in the input set to hash
* @Param[in] num_cols_to_hash The number of columns to hash
* @Param[in] num_partitions The number of partitions to rearrange the input rows into
* @Param[out] partitioned_output Preallocated gdf_columns to hold the rearrangement
* of the input columns into the desired number of partitions
* @Param[out] partition_offsets Preallocated array the size of the number of
* partitions. Where partition_offsets[i] indicates the starting position
* of partition 'i'
* @Param[in] hash The hash function to use
*
* @Returns If the operation was successful, returns GDF_SUCCESS
*/
/* ----------------------------------------------------------------------------*/
gdf_error gdf_hash_partition(int num_input_cols,
gdf_column * input[],
int columns_to_hash[],
int num_cols_to_hash,
int num_partitions,
gdf_column * partitioned_output[],
int partition_offsets[],
gdf_hash_func hash)
{
// Use int until gdf API is updated to use something other than int
// for ordinal variables
using size_type = int;
// Ensure all the inputs are non-zero and not null
if((0 == num_input_cols)
|| (0 == num_cols_to_hash)
|| (0 == num_partitions)
|| (nullptr == input)
|| (nullptr == partitioned_output)
|| (nullptr == columns_to_hash)
|| (nullptr == partition_offsets))
{
return GDF_INVALID_API_CALL;
}
const size_t num_rows{input[0]->size};
// If the input is empty, return immediately
if(0 == num_rows)
{
return GDF_SUCCESS;
}
// TODO Check if the num_rows is > MAX_ROWS (MAX_INT)
// check that the columns data are not null, have matching types,
// and the same number of rows
for (size_type i = 0; i < num_input_cols; i++) {
if( (nullptr == input[i]->data)
|| (nullptr == partitioned_output[i]->data))
return GDF_DATASET_EMPTY;
if(input[i]->dtype != partitioned_output[i]->dtype)
return GDF_PARTITION_DTYPE_MISMATCH;
if((num_rows != input[i]->size)
|| (num_rows != partitioned_output[i]->size))
return GDF_COLUMN_SIZE_MISMATCH;
}
PUSH_RANGE("LIBGDF_HASH_PARTITION", PARTITION_COLOR);
// Wrap input and output columns in gdf_table
std::unique_ptr< const gdf_table<size_type> > input_table{new gdf_table<size_type>(num_input_cols, input)};
std::unique_ptr< gdf_table<size_type> > output_table{new gdf_table<size_type>(num_input_cols, partitioned_output)};
// Create vector of pointers to columns that will be hashed
std::vector<gdf_column *> gdf_columns_to_hash(num_cols_to_hash);
for(size_type i = 0; i < num_cols_to_hash; ++i)
{
gdf_columns_to_hash[i] = input[columns_to_hash[i]];
}
// Create a separate table of the columns to be hashed
std::unique_ptr< const gdf_table<size_type> > table_to_hash {new gdf_table<size_type>(num_cols_to_hash,
gdf_columns_to_hash.data())};
gdf_error gdf_status{GDF_SUCCESS};
switch(hash)
{
case GDF_HASH_MURMUR3:
{
gdf_status = hash_partition_gdf_table<MurmurHash3_32>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
case GDF_HASH_IDENTITY:
{
gdf_status = hash_partition_gdf_table<IdentityHash>(*input_table,
*table_to_hash,
num_partitions,
partition_offsets,
*output_table);
break;
}
default:
gdf_status = GDF_INVALID_HASH_FUNCTION;
}
POP_RANGE();
return gdf_status;
}
|
ebedffd44135f0d8523ad8b36e7536e371233acd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// =============================================================================
// === GPUQREngine/Source/GPUQREngine_UberKernel.cu ============================
// =============================================================================
// GPUQREngine, Copyright (c) 2013, Timothy A Davis, Sencer Nuri Yeralan,
// and Sanjay Ranka. All Rights Reserved.
// SPDX-License-Identifier: GPL-2.0+
//------------------------------------------------------------------------------
//
// This is the actual concrete kernel invocation, transfering control flow to
// the GPU accelerator briefly. We actually launch kernels using alternating
// streams to overlap communication with computation, so the launch is actually
// asynchronous in nature. We use the CUDA events and streams model througout
// the Scheduler to coordinate asynchronous launch behavior.
//
// =============================================================================
#ifdef SUITESPARSE_CUDA
#define CUDA_INCLUDE
#include "Kernel/uberKernel.cu"
void GPUQREngine_UberKernel
(
hipStream_t kernelStream, // The stream on which to launch the kernel
TaskDescriptor *gpuWorkQueue, // The list of work items for the GPU
int numTasks // The # of items in the work list
)
{
/* Set the standard launch configuration. */
dim3 threads(NUMTHREADS, 1);
dim3 grid(numTasks, 1);
/* Launch the kernel */
hipLaunchKernelGGL(( qrKernel), dim3(grid), dim3(threads), 0, kernelStream, gpuWorkQueue, numTasks);
}
#endif
| ebedffd44135f0d8523ad8b36e7536e371233acd.cu | // =============================================================================
// === GPUQREngine/Source/GPUQREngine_UberKernel.cu ============================
// =============================================================================
// GPUQREngine, Copyright (c) 2013, Timothy A Davis, Sencer Nuri Yeralan,
// and Sanjay Ranka. All Rights Reserved.
// SPDX-License-Identifier: GPL-2.0+
//------------------------------------------------------------------------------
//
// This is the actual concrete kernel invocation, transfering control flow to
// the GPU accelerator briefly. We actually launch kernels using alternating
// streams to overlap communication with computation, so the launch is actually
// asynchronous in nature. We use the CUDA events and streams model througout
// the Scheduler to coordinate asynchronous launch behavior.
//
// =============================================================================
#ifdef SUITESPARSE_CUDA
#define CUDA_INCLUDE
#include "Kernel/uberKernel.cu"
void GPUQREngine_UberKernel
(
cudaStream_t kernelStream, // The stream on which to launch the kernel
TaskDescriptor *gpuWorkQueue, // The list of work items for the GPU
int numTasks // The # of items in the work list
)
{
/* Set the standard launch configuration. */
dim3 threads(NUMTHREADS, 1);
dim3 grid(numTasks, 1);
/* Launch the kernel */
qrKernel<<<grid, threads, 0, kernelStream>>>(gpuWorkQueue, numTasks);
}
#endif
|
4aecbd6c95a905fdd83cbf551d37d66506f344bf.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
} | 4aecbd6c95a905fdd83cbf551d37d66506f344bf.cu | #include "includes.h"
__global__ void vecAdd(float *in1, float *in2, float *out, int len) {
//@@ Insert code to implement vector addition here
} |
4a0fd6cbc6ca64c23201ca8b439df129f594fbda.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include"E:\Program Files\MATLAB\R2012b\extern\include\mex.h"
#include <stdio.h>
#include <algorithm>
__global__ void smooth_kernel(double *FinS,double* midP,double * ring,double *MSaliencyM ,double *K1,double *N1,double *Par,double *spnum ,double *Par1,double *Par2)
{
int i=blockIdx.x;
int j=threadIdx.x;
int K=(int)(*K1),N=(int)(*N1);
if (blockIdx.x >= N || threadIdx.x >= spnum[i])
return;
double Lcolor1,Lcolor2,Lcolor3,Llocation1,Llocation2,Lcolor4,Lcolor5;
double Rcolor1,Rcolor2,Rcolor3,Rlocation1,Rlocation2,Rcolor4,Rcolor5;
double Tweight=0,weight1=0;
double MSS=0;
double CDist=0,LDist=0;
Llocation1 = midP[(K)*11*i+j],Llocation2 = midP[(K)*11*i+(K)*1+j],Lcolor1 = midP[(K)*11*i+(K)*2+j], Lcolor2 = midP[(K)*11*i+(K)*3+j],Lcolor3 = midP[(K)*11*i+(K)*4+j],Lcolor4 = midP[(K)*11*i+(K)*9+j],Lcolor5 = midP[(K)*11*i+(K)*10+j];
for (int k=0;k<spnum[i];k++)
{
Rlocation1 = midP[(K)*11*i+k],Rlocation2 = midP[(K)*11*i+(K)*1+k],Rcolor1 = midP[(K)*11*i+(K)*2+k], Rcolor2 = midP[(K)*11*i+(K)*3+k],Rcolor3 = midP[(K)*11*i+(K)*4+k],Rcolor4 = midP[(K)*11*i+(K)*9+k],Rcolor5 = midP[(K)*11*i+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
// CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
if (LDist<min(max(ring[i*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*i+k]*weight1;
}
}
if(i<N-1)
for (int k=0;k<spnum[i+1];k++)
{
Rlocation1 = midP[(K)*11*(i+1)+k],Rlocation2 = midP[(K)*11*(i+1)+(K)*1+k],Rcolor1 = midP[(K)*11*(i+1)+(K)*2+k], Rcolor2 = midP[(K)*11*(i+1)+(K)*3+k],Rcolor3 = midP[(K)*11*(i+1)+(K)*4+k],Rcolor4 = midP[(K)*11*(i+1)+(K)*9+k],Rcolor5 = midP[(K)*11*(i+1)+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
//CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
if (LDist<min(max(ring[(i+1)*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*(i+1)+k]*weight1;
}
}
if (i>0)
for (int k=0;k<spnum[i-1];k++)
{
Rlocation1 = midP[(K)*11*(i-1)+k],Rlocation2 = midP[(K)*11*(i-1)+(K)*1+k],Rcolor1 = midP[(K)*11*(i-1)+(K)*2+k], Rcolor2 = midP[(K)*11*(i-1)+(K)*3+k],Rcolor3 = midP[(K)*11*(i-1)+(K)*4+k],Rcolor4 = midP[(K)*11*(i-1)+(K)*9+k],Rcolor5 = midP[(K)*11*(i-1)+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
//CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
if (LDist<min(max(ring[(i-1)*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*(i-1)+k]*weight1;
}
}
MSS=MSS/Tweight;
FinS[K*i+j]=MSS;
return;
}
void Final_smooth(double *FinS,double* midP,double * ring,double *MSaliencyM,double *K1,double *N1,double *Par,double *spnum,double *Par1,double *Par2)
{
double * dev_FinS;
double *dev_mid,*dev_ring,*dev_MSaliencyM;
double *dev_K1,*dev_N1, *dev_Par,*dev_spnum,*dev_Par1,*dev_Par2;
int K=(int)(*K1),N=(int)(*N1);
hipMalloc((void **)&dev_mid, sizeof(double)* (K) * 11 * N);
hipMalloc((void **)&dev_ring, sizeof(double)* K * N);
hipMalloc((void **)&dev_MSaliencyM, sizeof(double)* K * N);
hipMalloc((void **)&dev_FinS, sizeof(double)* K * N);
hipMalloc((void **)&dev_K1, sizeof(double));
hipMalloc((void **)&dev_N1, sizeof(double));
hipMalloc((void **)&dev_Par, sizeof(double));
hipMalloc((void **)&dev_Par1, sizeof(double));
hipMalloc((void **)&dev_Par2, sizeof(double));
hipMalloc((void **)&dev_spnum, sizeof(double)*N);
hipMemcpy(dev_K1, K1, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_N1, N1, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_mid, midP, sizeof(double)* (K) * 11 * N, hipMemcpyHostToDevice);
hipMemcpy(dev_ring, ring, sizeof(double)* K*N, hipMemcpyHostToDevice);
hipMemcpy(dev_MSaliencyM, MSaliencyM, sizeof(double)* K*N, hipMemcpyHostToDevice);
hipMemcpy(dev_Par, Par, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_Par1, Par1, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_Par2, Par2, sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_spnum, spnum, sizeof(double)*N, hipMemcpyHostToDevice);
dim3 threads(K);
dim3 grids(N);
smooth_kernel << <grids, threads >> >(dev_FinS,dev_mid,dev_ring,dev_MSaliencyM,dev_K1,dev_N1,dev_Par,dev_spnum,dev_Par1,dev_Par2);
hipMemcpy(FinS, dev_FinS, sizeof(double)*K*N, hipMemcpyDeviceToHost);
hipFree(dev_mid);
hipFree(dev_FinS);
hipFree(dev_MSaliencyM);
hipFree(dev_ring);
hipFree(dev_K1);
hipFree(dev_N1);
hipFree(dev_Par);
hipFree(dev_spnum);
hipFree(dev_Par1);
hipFree(dev_Par2);
}
| 4a0fd6cbc6ca64c23201ca8b439df129f594fbda.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include"E:\Program Files\MATLAB\R2012b\extern\include\mex.h"
#include <stdio.h>
#include <algorithm>
__global__ void smooth_kernel(double *FinS,double* midP,double * ring,double *MSaliencyM ,double *K1,double *N1,double *Par,double *spnum ,double *Par1,double *Par2)
{
int i=blockIdx.x;
int j=threadIdx.x;
int K=(int)(*K1),N=(int)(*N1);
if (blockIdx.x >= N || threadIdx.x >= spnum[i])
return;
double Lcolor1,Lcolor2,Lcolor3,Llocation1,Llocation2,Lcolor4,Lcolor5;
double Rcolor1,Rcolor2,Rcolor3,Rlocation1,Rlocation2,Rcolor4,Rcolor5;
double Tweight=0,weight1=0;
double MSS=0;
double CDist=0,LDist=0;
Llocation1 = midP[(K)*11*i+j],Llocation2 = midP[(K)*11*i+(K)*1+j],Lcolor1 = midP[(K)*11*i+(K)*2+j], Lcolor2 = midP[(K)*11*i+(K)*3+j],Lcolor3 = midP[(K)*11*i+(K)*4+j],Lcolor4 = midP[(K)*11*i+(K)*9+j],Lcolor5 = midP[(K)*11*i+(K)*10+j];
for (int k=0;k<spnum[i];k++)
{
Rlocation1 = midP[(K)*11*i+k],Rlocation2 = midP[(K)*11*i+(K)*1+k],Rcolor1 = midP[(K)*11*i+(K)*2+k], Rcolor2 = midP[(K)*11*i+(K)*3+k],Rcolor3 = midP[(K)*11*i+(K)*4+k],Rcolor4 = midP[(K)*11*i+(K)*9+k],Rcolor5 = midP[(K)*11*i+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
// CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
if (LDist<min(max(ring[i*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*i+k]*weight1;
}
}
if(i<N-1)
for (int k=0;k<spnum[i+1];k++)
{
Rlocation1 = midP[(K)*11*(i+1)+k],Rlocation2 = midP[(K)*11*(i+1)+(K)*1+k],Rcolor1 = midP[(K)*11*(i+1)+(K)*2+k], Rcolor2 = midP[(K)*11*(i+1)+(K)*3+k],Rcolor3 = midP[(K)*11*(i+1)+(K)*4+k],Rcolor4 = midP[(K)*11*(i+1)+(K)*9+k],Rcolor5 = midP[(K)*11*(i+1)+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
//CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
if (LDist<min(max(ring[(i+1)*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*(i+1)+k]*weight1;
}
}
if (i>0)
for (int k=0;k<spnum[i-1];k++)
{
Rlocation1 = midP[(K)*11*(i-1)+k],Rlocation2 = midP[(K)*11*(i-1)+(K)*1+k],Rcolor1 = midP[(K)*11*(i-1)+(K)*2+k], Rcolor2 = midP[(K)*11*(i-1)+(K)*3+k],Rcolor3 = midP[(K)*11*(i-1)+(K)*4+k],Rcolor4 = midP[(K)*11*(i-1)+(K)*9+k],Rcolor5 = midP[(K)*11*(i-1)+(K)*10+k];
LDist=abs(Llocation1-Rlocation1)+abs(Llocation2-Rlocation2);
CDist=sqrt((Lcolor1-Rcolor1)*(Lcolor1-Rcolor1)+(Lcolor3-Rcolor3)*(Lcolor3-Rcolor3)+(Lcolor2-Rcolor2)*(Lcolor2-Rcolor2)+(Lcolor4-Rcolor4)*(Lcolor4-Rcolor4)+(Lcolor5-Rcolor5)*(Lcolor5-Rcolor5));
//CDist=(abs(Lcolor1-Rcolor1)+abs(Lcolor3-Rcolor3)+abs(Lcolor2-Rcolor2)+abs(Lcolor4-Rcolor4)+abs(Lcolor5-Rcolor5));
if (LDist<min(max(ring[(i-1)*K+k],*Par1),*Par2))
{
weight1=exp(-CDist*(*Par));
Tweight+=weight1;
MSS=MSS+MSaliencyM[K*(i-1)+k]*weight1;
}
}
MSS=MSS/Tweight;
FinS[K*i+j]=MSS;
return;
}
void Final_smooth(double *FinS,double* midP,double * ring,double *MSaliencyM,double *K1,double *N1,double *Par,double *spnum,double *Par1,double *Par2)
{
double * dev_FinS;
double *dev_mid,*dev_ring,*dev_MSaliencyM;
double *dev_K1,*dev_N1, *dev_Par,*dev_spnum,*dev_Par1,*dev_Par2;
int K=(int)(*K1),N=(int)(*N1);
cudaMalloc((void **)&dev_mid, sizeof(double)* (K) * 11 * N);
cudaMalloc((void **)&dev_ring, sizeof(double)* K * N);
cudaMalloc((void **)&dev_MSaliencyM, sizeof(double)* K * N);
cudaMalloc((void **)&dev_FinS, sizeof(double)* K * N);
cudaMalloc((void **)&dev_K1, sizeof(double));
cudaMalloc((void **)&dev_N1, sizeof(double));
cudaMalloc((void **)&dev_Par, sizeof(double));
cudaMalloc((void **)&dev_Par1, sizeof(double));
cudaMalloc((void **)&dev_Par2, sizeof(double));
cudaMalloc((void **)&dev_spnum, sizeof(double)*N);
cudaMemcpy(dev_K1, K1, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_N1, N1, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_mid, midP, sizeof(double)* (K) * 11 * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_ring, ring, sizeof(double)* K*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_MSaliencyM, MSaliencyM, sizeof(double)* K*N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_Par, Par, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Par1, Par1, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_Par2, Par2, sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_spnum, spnum, sizeof(double)*N, cudaMemcpyHostToDevice);
dim3 threads(K);
dim3 grids(N);
smooth_kernel << <grids, threads >> >(dev_FinS,dev_mid,dev_ring,dev_MSaliencyM,dev_K1,dev_N1,dev_Par,dev_spnum,dev_Par1,dev_Par2);
cudaMemcpy(FinS, dev_FinS, sizeof(double)*K*N, cudaMemcpyDeviceToHost);
cudaFree(dev_mid);
cudaFree(dev_FinS);
cudaFree(dev_MSaliencyM);
cudaFree(dev_ring);
cudaFree(dev_K1);
cudaFree(dev_N1);
cudaFree(dev_Par);
cudaFree(dev_spnum);
cudaFree(dev_Par1);
cudaFree(dev_Par2);
}
|
3a744dd92226c4dada520bc7e0c9d50008691572.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <time.h>
#include "histogram.h"
#include "assist.h"
#define TIMER_CREATE(t) \
hipEvent_t t##_start, t##_end; \
hipEventCreate(&t##_start); \
hipEventCreate(&t##_end);
#define TIMER_START(t) \
hipEventRecord(t##_start); \
hipEventSynchronize(t##_start);
#define TIMER_END(t) \
hipEventRecord(t##_end); \
hipEventSynchronize(t##_end); \
hipEventElapsedTime(&t, t##_start, t##_end); \
hipEventDestroy(t##_start); \
hipEventDestroy(t##_end);
#define TILE_SIZE 512
#define INTENSITY_RANGE 256
/* Switch of time counting */
#define CUDA_TIMING
#define CPU_SWITCH
unsigned char *input_gpu;
unsigned char *output_gpu;
/* Warm up kernel */
__global__ void kernel(unsigned char *input,
unsigned char *output)
{
int location = blockIdx.x * TILE_SIZE + threadIdx.x;
output[location] = location % 255;
}
/* Processing GPU kernel */
__global__ void count_intensity(unsigned int *input,
unsigned int size,
unsigned int *intensity_num)
{
unsigned int location = blockIdx.x * TILE_SIZE + threadIdx.x;
if (location < (size >> 2))
{
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0xFF000000)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x00FF0000)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x0000FF00)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x000000FF)], 1);
}
}
__global__ void prefixSum(unsigned int *intensity_num,
unsigned char *min_index)
{
for (int i = 1; i < INTENSITY_RANGE; ++i)
{
intensity_num[i] += intensity_num[i - 1];
if (intensity_num[i] < intensity_num[i - 1])
{
*min_index = i;
}
}
}
__global__ void probability(unsigned int *intensity_num,
double *intensity_pro,
unsigned int size,
unsigned char *min_index)
{
unsigned int index = threadIdx.x;
if (index < INTENSITY_RANGE)
{
intensity_pro[index] = ((double)(intensity_num[index] - intensity_num[*min_index])) / (size - intensity_num[*min_index]);
}
}
__global__ void histo_equalized(unsigned char *input,
unsigned int size,
double *intensity_pro,
unsigned char *output)
{
unsigned int location = blockIdx.x * TILE_SIZE + threadIdx.x;
if (location < size)
{
output[location] = (unsigned char)((INTENSITY_RANGE - 1) *
intensity_pro[input[location]]);
}
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width)
{
/* Both are the same size (CPU/GPU). */
int size = width * height;
int gridSize = 1 + ((size - 1) / TILE_SIZE);
unsigned int *intensity_num;
double *intensity_pro;
unsigned char *min_index;
checkCuda(hipMalloc((void **)&input_gpu, size * sizeof(unsigned char)));
checkCuda(hipMalloc((void **)&output_gpu, size * sizeof(unsigned char)));
checkCuda(hipMalloc((void **)&intensity_num, INTENSITY_RANGE * sizeof(unsigned int)));
checkCuda(hipMalloc((void **)&intensity_pro, INTENSITY_RANGE * sizeof(double)));
checkCuda(hipMalloc((void **)&min_index, 1 * sizeof(double)));
/* Copy data to GPU */
checkCuda(hipMemcpy(input_gpu,
data,
size * sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipMemset(intensity_num, 0, INTENSITY_RANGE * sizeof(unsigned int)));
checkCuda(hipMemset(min_index, 0, 1 * sizeof(unsigned int)));
checkCuda(hipDeviceSynchronize());
/* Execute algorithm */
dim3 dimGrid(gridSize);
dim3 dimBlock(TILE_SIZE);
/* Kernel Call */
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
hipLaunchKernelGGL(( count_intensity), dim3(dimGrid), dim3(dimBlock), 0, 0, (unsigned int *)input_gpu,
size,
intensity_num);
hipLaunchKernelGGL(( prefixSum), dim3(1), dim3(1), 0, 0, intensity_num, min_index);
hipLaunchKernelGGL(( probability), dim3(1), dim3(INTENSITY_RANGE), 0, 0, intensity_num, intensity_pro, size, min_index);
hipLaunchKernelGGL(( histo_equalized), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu, size, intensity_pro, output_gpu);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
/* Retrieve results from the GPU */
checkCuda(hipMemcpy(data,
output_gpu,
size * sizeof(unsigned char),
hipMemcpyDeviceToHost));
/* Free resources and end the program */
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
checkCuda(hipFree(intensity_num));
checkCuda(hipFree(intensity_pro));
checkCuda(hipFree(min_index));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width)
{
/* Both are the same size (CPU/GPU). */
int size = height * width;
int gridSize = 1 + ((size - 1) / TILE_SIZE);
/* Allocate arrays in GPU memory */
checkCuda(hipMalloc((void **)&input_gpu, size * sizeof(unsigned char)));
checkCuda(hipMalloc((void **)&output_gpu, size * sizeof(unsigned char)));
checkCuda(hipMemset(output_gpu, 0, size * sizeof(unsigned char)));
/* Copy data to GPU */
checkCuda(hipMemcpy(input_gpu,
data,
size * sizeof(char),
hipMemcpyHostToDevice));
checkCuda(hipDeviceSynchronize());
/* Execute algorithm */
dim3 dimGrid(gridSize);
dim3 dimBlock(TILE_SIZE);
hipLaunchKernelGGL(( kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, input_gpu,
output_gpu);
checkCuda(hipDeviceSynchronize());
/* Retrieve results from the GPU */
checkCuda(hipMemcpy(data,
output_gpu,
size * sizeof(unsigned char),
hipMemcpyDeviceToHost));
/* Free resources and end the program */
checkCuda(hipFree(output_gpu));
checkCuda(hipFree(input_gpu));
}
| 3a744dd92226c4dada520bc7e0c9d50008691572.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <time.h>
#include "histogram.h"
#include "assist.h"
#define TIMER_CREATE(t) \
cudaEvent_t t##_start, t##_end; \
cudaEventCreate(&t##_start); \
cudaEventCreate(&t##_end);
#define TIMER_START(t) \
cudaEventRecord(t##_start); \
cudaEventSynchronize(t##_start);
#define TIMER_END(t) \
cudaEventRecord(t##_end); \
cudaEventSynchronize(t##_end); \
cudaEventElapsedTime(&t, t##_start, t##_end); \
cudaEventDestroy(t##_start); \
cudaEventDestroy(t##_end);
#define TILE_SIZE 512
#define INTENSITY_RANGE 256
/* Switch of time counting */
#define CUDA_TIMING
#define CPU_SWITCH
unsigned char *input_gpu;
unsigned char *output_gpu;
/* Warm up kernel */
__global__ void kernel(unsigned char *input,
unsigned char *output)
{
int location = blockIdx.x * TILE_SIZE + threadIdx.x;
output[location] = location % 255;
}
/* Processing GPU kernel */
__global__ void count_intensity(unsigned int *input,
unsigned int size,
unsigned int *intensity_num)
{
unsigned int location = blockIdx.x * TILE_SIZE + threadIdx.x;
if (location < (size >> 2))
{
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0xFF000000)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x00FF0000)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x0000FF00)], 1);
atomicAdd(&intensity_num[(unsigned char)(input[location] & 0x000000FF)], 1);
}
}
__global__ void prefixSum(unsigned int *intensity_num,
unsigned char *min_index)
{
for (int i = 1; i < INTENSITY_RANGE; ++i)
{
intensity_num[i] += intensity_num[i - 1];
if (intensity_num[i] < intensity_num[i - 1])
{
*min_index = i;
}
}
}
__global__ void probability(unsigned int *intensity_num,
double *intensity_pro,
unsigned int size,
unsigned char *min_index)
{
unsigned int index = threadIdx.x;
if (index < INTENSITY_RANGE)
{
intensity_pro[index] = ((double)(intensity_num[index] - intensity_num[*min_index])) / (size - intensity_num[*min_index]);
}
}
__global__ void histo_equalized(unsigned char *input,
unsigned int size,
double *intensity_pro,
unsigned char *output)
{
unsigned int location = blockIdx.x * TILE_SIZE + threadIdx.x;
if (location < size)
{
output[location] = (unsigned char)((INTENSITY_RANGE - 1) *
intensity_pro[input[location]]);
}
}
void histogram_gpu(unsigned char *data,
unsigned int height,
unsigned int width)
{
/* Both are the same size (CPU/GPU). */
int size = width * height;
int gridSize = 1 + ((size - 1) / TILE_SIZE);
unsigned int *intensity_num;
double *intensity_pro;
unsigned char *min_index;
checkCuda(cudaMalloc((void **)&input_gpu, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void **)&output_gpu, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void **)&intensity_num, INTENSITY_RANGE * sizeof(unsigned int)));
checkCuda(cudaMalloc((void **)&intensity_pro, INTENSITY_RANGE * sizeof(double)));
checkCuda(cudaMalloc((void **)&min_index, 1 * sizeof(double)));
/* Copy data to GPU */
checkCuda(cudaMemcpy(input_gpu,
data,
size * sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaMemset(intensity_num, 0, INTENSITY_RANGE * sizeof(unsigned int)));
checkCuda(cudaMemset(min_index, 0, 1 * sizeof(unsigned int)));
checkCuda(cudaDeviceSynchronize());
/* Execute algorithm */
dim3 dimGrid(gridSize);
dim3 dimBlock(TILE_SIZE);
/* Kernel Call */
#if defined(CUDA_TIMING)
float Ktime;
TIMER_CREATE(Ktime);
TIMER_START(Ktime);
#endif
count_intensity<<<dimGrid, dimBlock>>>((unsigned int *)input_gpu,
size,
intensity_num);
prefixSum<<<1, 1>>>(intensity_num, min_index);
probability<<<1, INTENSITY_RANGE>>>(intensity_num, intensity_pro, size, min_index);
histo_equalized<<<dimGrid, dimBlock>>>(input_gpu, size, intensity_pro, output_gpu);
#if defined(CUDA_TIMING)
TIMER_END(Ktime);
printf("Kernel Execution Time: %f ms\n", Ktime);
#endif
/* Retrieve results from the GPU */
checkCuda(cudaMemcpy(data,
output_gpu,
size * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
/* Free resources and end the program */
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
checkCuda(cudaFree(intensity_num));
checkCuda(cudaFree(intensity_pro));
checkCuda(cudaFree(min_index));
}
void histogram_gpu_warmup(unsigned char *data,
unsigned int height,
unsigned int width)
{
/* Both are the same size (CPU/GPU). */
int size = height * width;
int gridSize = 1 + ((size - 1) / TILE_SIZE);
/* Allocate arrays in GPU memory */
checkCuda(cudaMalloc((void **)&input_gpu, size * sizeof(unsigned char)));
checkCuda(cudaMalloc((void **)&output_gpu, size * sizeof(unsigned char)));
checkCuda(cudaMemset(output_gpu, 0, size * sizeof(unsigned char)));
/* Copy data to GPU */
checkCuda(cudaMemcpy(input_gpu,
data,
size * sizeof(char),
cudaMemcpyHostToDevice));
checkCuda(cudaDeviceSynchronize());
/* Execute algorithm */
dim3 dimGrid(gridSize);
dim3 dimBlock(TILE_SIZE);
kernel<<<dimGrid, dimBlock>>>(input_gpu,
output_gpu);
checkCuda(cudaDeviceSynchronize());
/* Retrieve results from the GPU */
checkCuda(cudaMemcpy(data,
output_gpu,
size * sizeof(unsigned char),
cudaMemcpyDeviceToHost));
/* Free resources and end the program */
checkCuda(cudaFree(output_gpu));
checkCuda(cudaFree(input_gpu));
}
|
71cf48e82f76050b2533e7166102729f3b8bcc68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define BDIMX 32
#define BDIMY 16
dim3 block (BDIMX, BDIMY);
dim3 grid (1,1);
__global__ void setRowReadRow(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.y][threadIdx.x] ;
}
__global__ void setColReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMX][BDIMY];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from 2D thread index to linear memory
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed coordinate (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[icol][irow];
}
__global__ void setRowReadColDyn(int *out)
{
// dynamic shared memory
extern __shared__ int tile[];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// convert back to smem idx to access the transposed element
unsigned int col_idx = icol * blockDim.x + irow;
// shared memory store operation
tile[idx] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[col_idx];
}
int main()
{
int *c;
c = (int*)malloc(BDIMX * BDIMY * sizeof(int));
int *d_C;
hipMalloc(&d_C, BDIMX * BDIMY * sizeof(int));
hipLaunchKernelGGL(( setRowReadRow), dim3(grid), dim3(block), BDIMX * BDIMY * sizeof(int), 0, d_C);
hipDeviceSynchronize();
hipMemcpy(c, d_C, BDIMX * BDIMY * sizeof(int), hipMemcpyDeviceToHost);
for (int y = 0; y < BDIMY; y++)
{
printf("[ ");
for (int x = 0; x < BDIMX; x++)
printf("% 4d ", c[y * BDIMX + x]);
printf("]\n");
}
hipFree(d_C);
free(c);
return 0;
} | 71cf48e82f76050b2533e7166102729f3b8bcc68.cu | #include <stdio.h>
#define BDIMX 32
#define BDIMY 16
dim3 block (BDIMX, BDIMY);
dim3 grid (1,1);
__global__ void setRowReadRow(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.y][threadIdx.x] ;
}
__global__ void setColReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMX][BDIMY];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setRowReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from 2D thread index to linear memory
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed coordinate (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[icol][irow];
}
__global__ void setRowReadColDyn(int *out)
{
// dynamic shared memory
extern __shared__ int tile[];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// convert back to smem idx to access the transposed element
unsigned int col_idx = icol * blockDim.x + irow;
// shared memory store operation
tile[idx] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[col_idx];
}
int main()
{
int *c;
c = (int*)malloc(BDIMX * BDIMY * sizeof(int));
int *d_C;
cudaMalloc(&d_C, BDIMX * BDIMY * sizeof(int));
setRowReadRow<<<grid, block, BDIMX * BDIMY * sizeof(int)>>>(d_C);
cudaDeviceSynchronize();
cudaMemcpy(c, d_C, BDIMX * BDIMY * sizeof(int), cudaMemcpyDeviceToHost);
for (int y = 0; y < BDIMY; y++)
{
printf("[ ");
for (int x = 0; x < BDIMX; x++)
printf("% 4d ", c[y * BDIMX + x]);
printf("]\n");
}
cudaFree(d_C);
free(c);
return 0;
} |
cd3f150e75b6926e68b4c956613fcbce21ff2a8d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DecodeBBoxesKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( PermuteDataKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_max<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_subtract<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_exp<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_sum<Dtype>), dim3(CAFFE_GET_BLOCKS(outer_num * inner_num)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( kernel_channel_div<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeOverlappedByIdxKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( GetDetectionsKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_det)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( ComputeConfLossKernel<Dtype>), dim3(CAFFE_GET_BLOCKS(num_threads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
| cd3f150e75b6926e68b4c956613fcbce21ff2a8d.cu | #include <algorithm>
#include <functional>
#include <map>
#include <vector>
#include "thrust/functional.h"
#include "thrust/sort.h"
#include "caffe/common.hpp"
#include "bbox_util.hpp"
namespace caffe {
template <typename Dtype>
__host__ __device__ Dtype BBoxSizeGPU(const Dtype* bbox,
const bool normalized) {
if (bbox[2] < bbox[0] || bbox[3] < bbox[1]) {
// If bbox is invalid (e.g. xmax < xmin or ymax < ymin), return 0.
return Dtype(0.);
} else {
const Dtype width = bbox[2] - bbox[0];
const Dtype height = bbox[3] - bbox[1];
if (normalized) {
return width * height;
} else {
// If bbox is not within range [0, 1].
return (width + 1) * (height + 1);
}
}
}
template __host__ __device__ float BBoxSizeGPU(const float* bbox,
const bool normalized);
template __host__ __device__ double BBoxSizeGPU(const double* bbox,
const bool normalized);
template <typename Dtype>
__host__ __device__ Dtype JaccardOverlapGPU(const Dtype* bbox1,
const Dtype* bbox2) {
if (bbox2[0] > bbox1[2] || bbox2[2] < bbox1[0] ||
bbox2[1] > bbox1[3] || bbox2[3] < bbox1[1]) {
return Dtype(0.);
} else {
const Dtype inter_xmin = max(bbox1[0], bbox2[0]);
const Dtype inter_ymin = max(bbox1[1], bbox2[1]);
const Dtype inter_xmax = min(bbox1[2], bbox2[2]);
const Dtype inter_ymax = min(bbox1[3], bbox2[3]);
const Dtype inter_width = inter_xmax - inter_xmin;
const Dtype inter_height = inter_ymax - inter_ymin;
const Dtype inter_size = inter_width * inter_height;
const Dtype bbox1_size = BBoxSizeGPU(bbox1);
const Dtype bbox2_size = BBoxSizeGPU(bbox2);
return inter_size / (bbox1_size + bbox2_size - inter_size);
}
}
template __host__ __device__ float JaccardOverlapGPU(const float* bbox1,
const float* bbox2);
template __host__ __device__ double JaccardOverlapGPU(const double* bbox1,
const double* bbox2);
template <typename Dtype>
__device__ Dtype Min(const Dtype x, const Dtype y) {
return x < y ? x : y;
}
template <typename Dtype>
__device__ Dtype Max(const Dtype x, const Dtype y) {
return x > y ? x : y;
}
template <typename Dtype>
__device__ void ClipBBoxGPU(const Dtype* bbox, Dtype* clip_bbox) {
for (int i = 0; i < 4; ++i) {
clip_bbox[i] = Max(Min(bbox[i], Dtype(1.)), Dtype(0.));
}
}
template __device__ void ClipBBoxGPU(const float* bbox, float* clip_bbox);
template __device__ void ClipBBoxGPU(const double* bbox, double* clip_bbox);
template <typename Dtype>
__global__ void DecodeBBoxesKernel(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % 4;
const int c = (index / 4) % num_loc_classes;
const int d = (index / 4 / num_loc_classes) % num_priors;
if (!share_location && c == background_label_id) {
// Ignore background class if not share_location.
return;
}
const int pi = d * 4;
const int vi = pi + num_priors * 4;
if (code_type == PriorBoxParameter_CodeType_CORNER) {
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index];
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i];
}
} else if (code_type == PriorBoxParameter_CodeType_CENTER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
const Dtype prior_center_x = (p_xmin + p_xmax) / 2.;
const Dtype prior_center_y = (p_ymin + p_ymax) / 2.;
const Dtype xmin = loc_data[index - i];
const Dtype ymin = loc_data[index - i + 1];
const Dtype xmax = loc_data[index - i + 2];
const Dtype ymax = loc_data[index - i + 3];
Dtype decode_bbox_center_x, decode_bbox_center_y;
Dtype decode_bbox_width, decode_bbox_height;
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to retore the offset
// predictions.
decode_bbox_center_x = xmin * prior_width + prior_center_x;
decode_bbox_center_y = ymin * prior_height + prior_center_y;
decode_bbox_width = exp(xmax) * prior_width;
decode_bbox_height = exp(ymax) * prior_height;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
decode_bbox_center_x =
prior_data[vi] * xmin * prior_width + prior_center_x;
decode_bbox_center_y =
prior_data[vi + 1] * ymin * prior_height + prior_center_y;
decode_bbox_width =
exp(prior_data[vi + 2] * xmax) * prior_width;
decode_bbox_height =
exp(prior_data[vi + 3] * ymax) * prior_height;
}
switch (i) {
case 0:
bbox_data[index] = decode_bbox_center_x - decode_bbox_width / 2.;
break;
case 1:
bbox_data[index] = decode_bbox_center_y - decode_bbox_height / 2.;
break;
case 2:
bbox_data[index] = decode_bbox_center_x + decode_bbox_width / 2.;
break;
case 3:
bbox_data[index] = decode_bbox_center_y + decode_bbox_height / 2.;
break;
}
} else if (code_type == PriorBoxParameter_CodeType_CORNER_SIZE) {
const Dtype p_xmin = prior_data[pi];
const Dtype p_ymin = prior_data[pi + 1];
const Dtype p_xmax = prior_data[pi + 2];
const Dtype p_ymax = prior_data[pi + 3];
const Dtype prior_width = p_xmax - p_xmin;
const Dtype prior_height = p_ymax - p_ymin;
Dtype p_size;
if (i == 0 || i == 2) {
p_size = prior_width;
} else {
p_size = prior_height;
}
if (variance_encoded_in_target) {
// variance is encoded in target, we simply need to add the offset
// predictions.
bbox_data[index] = prior_data[pi + i] + loc_data[index] * p_size;
} else {
// variance is encoded in bbox, we need to scale the offset accordingly.
bbox_data[index] =
prior_data[pi + i] + loc_data[index] * prior_data[vi + i] * p_size;
}
} else {
// Unknown code type.
}
if (clip_bbox) {
bbox_data[index] = max(min(bbox_data[index], Dtype(1.)), Dtype(0.));
}
}
}
template <typename Dtype>
void DecodeBBoxesGPU(const int nthreads,
const Dtype* loc_data, const Dtype* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, Dtype* bbox_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
DecodeBBoxesKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, loc_data, prior_data, code_type,
variance_encoded_in_target, num_priors, share_location, num_loc_classes,
background_label_id, clip_bbox, bbox_data);
CUDA_POST_KERNEL_CHECK;
}
template void DecodeBBoxesGPU(const int nthreads,
const float* loc_data, const float* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, float* bbox_data);
template void DecodeBBoxesGPU(const int nthreads,
const double* loc_data, const double* prior_data,
const CodeType code_type, const bool variance_encoded_in_target,
const int num_priors, const bool share_location,
const int num_loc_classes, const int background_label_id,
const bool clip_bbox, double* bbox_data);
template <typename Dtype>
__global__ void PermuteDataKernel(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int i = index % num_dim;
const int c = (index / num_dim) % num_classes;
const int d = (index / num_dim / num_classes) % num_data;
const int n = index / num_dim / num_classes / num_data;
const int new_index = ((n * num_classes + c) * num_data + d) * num_dim + i;
new_data[new_index] = data[index];
}
}
template <typename Dtype>
void PermuteDataGPU(const int nthreads,
const Dtype* data, const int num_classes, const int num_data,
const int num_dim, Dtype* new_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
PermuteDataKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, data, num_classes, num_data,
num_dim, new_data);
CUDA_POST_KERNEL_CHECK;
}
template void PermuteDataGPU(const int nthreads,
const float* data, const int num_classes, const int num_data,
const int num_dim, float* new_data);
template void PermuteDataGPU(const int nthreads,
const double* data, const int num_classes, const int num_data,
const int num_dim, double* new_data);
template <typename Dtype>
__global__ void kernel_channel_max(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype maxval = -FLT_MAX;
for (int c = 0; c < channels; ++c) {
maxval = max(data[(n * channels + c) * spatial_dim + s], maxval);
}
out[index] = maxval;
}
}
template <typename Dtype>
__global__ void kernel_channel_subtract(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_data, const Dtype* channel_max,
Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] = channel_data[index] - channel_max[n * spatial_dim + s];
}
}
template <typename Dtype>
__global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) {
CUDA_KERNEL_LOOP(index, count) {
out[index] = exp(data[index]);
}
}
template <typename Dtype>
__global__ void kernel_channel_sum(const int num, const int channels,
const int spatial_dim, const Dtype* data, Dtype* channel_sum) {
CUDA_KERNEL_LOOP(index, num * spatial_dim) {
int n = index / spatial_dim;
int s = index % spatial_dim;
Dtype sum = 0;
for (int c = 0; c < channels; ++c) {
sum += data[(n * channels + c) * spatial_dim + s];
}
channel_sum[index] = sum;
}
}
template <typename Dtype>
__global__ void kernel_channel_div(const int count,
const int num, const int channels,
const int spatial_dim, const Dtype* channel_sum, Dtype* data) {
CUDA_KERNEL_LOOP(index, count) {
int n = index / channels / spatial_dim;
int s = index % spatial_dim;
data[index] /= channel_sum[n * spatial_dim + s];
}
}
template <typename Dtype>
void SoftMaxGPU(const Dtype* data, const int outer_num,
const int channels, const int inner_num, Dtype* prob) {
vector<int> shape(4, 1);
shape[0] = outer_num;
shape[1] = channels;
shape[2] = inner_num;
Blob<Dtype> scale(shape);
Dtype* scale_data = scale.mutable_gpu_data();
int count = outer_num * channels * inner_num;
// We need to subtract the max to avoid numerical issues, compute the exp,
// and then normalize.
// compute max
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_max<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, data,
scale_data);
// subtract
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_subtract<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
data, scale_data, prob);
// exponentiate
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_exp<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, prob, prob);
// sum after exp
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_sum<Dtype><<<CAFFE_GET_BLOCKS(outer_num * inner_num),
CAFFE_CUDA_NUM_THREADS>>>(outer_num, channels, inner_num, prob,
scale_data);
// divide
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_div<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(count, outer_num, channels, inner_num,
scale_data, prob);
}
template void SoftMaxGPU(const float* data, const int outer_num,
const int channels, const int inner_num, float* prob);
template void SoftMaxGPU(const double* data, const int outer_num,
const int channels, const int inner_num, double* prob);
template <typename Dtype>
__global__ void ComputeOverlappedKernel(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_bboxes;
const int i = (index / num_bboxes) % num_bboxes;
if (i == j) {
// Ignore same bbox.
return;
}
const int c = (index / num_bboxes / num_bboxes) % num_classes;
const int n = index / num_bboxes / num_bboxes / num_classes;
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = ((n * num_bboxes + i) * num_classes + c) * 4;
const int start_loc_j = ((n * num_bboxes + j) * num_classes + c) * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedGPU(const int nthreads,
const Dtype* bbox_data, const int num_bboxes, const int num_classes,
const Dtype overlap_threshold, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, num_bboxes, num_classes,
overlap_threshold, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedGPU(const int nthreads,
const float* bbox_data, const int num_bboxes, const int num_classes,
const float overlap_threshold, bool* overlapped_data);
template void ComputeOverlappedGPU(const int nthreads,
const double* bbox_data, const int num_bboxes, const int num_classes,
const double overlap_threshold, bool* overlapped_data);
template <typename Dtype>
__global__ void ComputeOverlappedByIdxKernel(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int j = index % num_idx;
const int i = (index / num_idx);
if (i == j) {
// Ignore same bbox.
return;
}
// Compute overlap between i-th bbox and j-th bbox.
const int start_loc_i = idx[i] * 4;
const int start_loc_j = idx[j] * 4;
const Dtype overlap = JaccardOverlapGPU<Dtype>(bbox_data + start_loc_i,
bbox_data + start_loc_j);
if (overlap > overlap_threshold) {
overlapped_data[index] = true;
}
}
}
template <typename Dtype>
void ComputeOverlappedByIdxGPU(const int nthreads,
const Dtype* bbox_data, const Dtype overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data) {
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeOverlappedByIdxKernel<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, bbox_data, overlap_threshold,
idx, num_idx, overlapped_data);
CUDA_POST_KERNEL_CHECK;
}
template void ComputeOverlappedByIdxGPU(const int nthreads,
const float* bbox_data, const float overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template void ComputeOverlappedByIdxGPU(const int nthreads,
const double* bbox_data, const double overlap_threshold,
const int* idx, const int num_idx, bool* overlapped_data);
template <typename Dtype>
void ApplyNMSGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices) {
// Keep part of detections whose scores are higher than confidence threshold.
vector<int> idx;
vector<Dtype> confidences;
for (int i = 0; i < num_bboxes; ++i) {
if (conf_data[i] > confidence_threshold) {
idx.push_back(i);
confidences.push_back(conf_data[i]);
}
}
int num_remain = confidences.size();
if (num_remain == 0) {
return;
}
// Sort detections based on score.
thrust::sort_by_key(&confidences[0], &confidences[0] + num_remain, &idx[0],
thrust::greater<Dtype>());
if (top_k > -1 && top_k < num_remain) {
num_remain = top_k;
}
// Compute overlap between remaining detections.
Blob<int> idx_blob(1, 1, 1, num_remain);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(idx.begin(), idx.begin() + num_remain, idx_data);
Blob<bool> overlapped(1, 1, num_remain, num_remain);
const int total_bboxes = overlapped.count();
bool* overlapped_data = overlapped.mutable_gpu_data();
ComputeOverlappedByIdxGPU<Dtype>(total_bboxes, bbox_data, nms_threshold,
idx_blob.gpu_data(), num_remain, overlapped_data);
// Do non-maximum suppression based on overlapped results.
const bool* overlapped_results = overlapped.cpu_data();
vector<int> selected_indices;
ApplyNMS(overlapped_results, num_remain, &selected_indices);
// Put back the selected information.
for (int i = 0; i < selected_indices.size(); ++i) {
indices->push_back(idx[selected_indices[i]]);
}
}
template
void ApplyNMSGPU(const float* bbox_data, const float* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template
void ApplyNMSGPU(const double* bbox_data, const double* conf_data,
const int num_bboxes, const float confidence_threshold,
const int top_k, const float nms_threshold, vector<int>* indices);
template <typename Dtype>
__global__ void GetDetectionsKernel(const int nthreads,
const Dtype* bbox_data, const Dtype* conf_data, const int image_id,
const int label, const int* indices, const bool clip_bbox,
Dtype* detection_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int det_idx = indices[index];
detection_data[index * 7] = image_id;
detection_data[index * 7 + 1] = label;
detection_data[index * 7 + 2] = conf_data[det_idx];
if (clip_bbox) {
ClipBBoxGPU(&(bbox_data[det_idx * 4]), &(detection_data[index * 7 + 3]));
} else {
for (int i = 0; i < 4; ++i) {
detection_data[index * 7 + 3 + i] = bbox_data[det_idx * 4 + i];
}
}
}
}
template <typename Dtype>
void GetDetectionsGPU(const Dtype* bbox_data, const Dtype* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<Dtype>* detection_blob) {
// Store selected indices in array.
int num_det = indices.size();
if (num_det == 0) {
return;
}
Blob<int> idx_blob(1, 1, 1, num_det);
int* idx_data = idx_blob.mutable_cpu_data();
std::copy(indices.begin(), indices.end(), idx_data);
// Prepare detection_blob.
detection_blob->Reshape(1, 1, num_det, 7);
Dtype* detection_data = detection_blob->mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
GetDetectionsKernel<Dtype><<<CAFFE_GET_BLOCKS(num_det),
CAFFE_CUDA_NUM_THREADS>>>(num_det, bbox_data, conf_data, image_id, label,
idx_blob.gpu_data(), clip_bbox, detection_data);
CUDA_POST_KERNEL_CHECK;
}
template void GetDetectionsGPU(const float* bbox_data, const float* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<float>* detection_blob);
template void GetDetectionsGPU(const double* bbox_data, const double* conf_data,
const int image_id, const int label, const vector<int>& indices,
const bool clip_bbox, Blob<double>* detection_blob);
template <typename Dtype>
__global__ void ComputeConfLossKernel(const int nthreads,
const Dtype* conf_data, const int num_preds_per_class,
const int num_classes, const ConfLossType loss_type,
const Dtype* match_data, Dtype* conf_loss_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
int label = match_data[index];
int num = index / num_preds_per_class;
int p = index % num_preds_per_class;
int start_idx = (num * num_preds_per_class + p) * num_classes;
Dtype loss = 0;
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
// Compute softmax probability.
Dtype prob = conf_data[start_idx + label];
loss = -log(Max(prob, Dtype(FLT_MIN)));
} else if (loss_type == MultiBoxLossParameter_ConfLossType_LOGISTIC) {
int target = 0;
for (int c = 0; c < num_classes; ++c) {
if (c == label) {
target = 1;
} else {
target = 0;
}
Dtype input = conf_data[start_idx + c];
loss -= input * (target - (input >= 0)) -
log(1 + exp(input - 2 * input * (input >= 0)));
}
}
conf_loss_data[index] = loss;
}
}
template <typename Dtype>
void ComputeConfLossGPU(const Blob<Dtype>& conf_blob, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss) {
CHECK_LT(background_label_id, num_classes);
Blob<Dtype> match_blob(num, num_preds_per_class, 1, 1);
Dtype* match_data = match_blob.mutable_cpu_data();
for (int i = 0; i < num; ++i) {
const map<int, vector<int> >& match_indices = all_match_indices[i];
for (int p = 0; p < num_preds_per_class; ++p) {
// Get the label index.
int label = background_label_id;
for (map<int, vector<int> >::const_iterator it =
match_indices.begin(); it != match_indices.end(); ++it) {
const vector<int>& match_index = it->second;
CHECK_EQ(match_index.size(), num_preds_per_class);
if (match_index[p] > -1) {
CHECK(all_gt_bboxes.find(i) != all_gt_bboxes.end());
const vector<NormalizedBBox>& gt_bboxes =
all_gt_bboxes.find(i)->second;
CHECK_LT(match_index[p], gt_bboxes.size());
label = gt_bboxes[match_index[p]].label();
CHECK_GE(label, 0);
CHECK_NE(label, background_label_id);
CHECK_LT(label, num_classes);
// A prior can only be matched to one gt bbox.
break;
}
}
match_data[i * num_preds_per_class + p] = label;
}
}
// Get probability data.
const Dtype* conf_gpu_data = conf_blob.gpu_data();
Blob<Dtype> prob_blob;
prob_blob.ReshapeLike(conf_blob);
if (loss_type == MultiBoxLossParameter_ConfLossType_SOFTMAX) {
Dtype* prob_gpu_data = prob_blob.mutable_gpu_data();
SoftMaxGPU(conf_blob.gpu_data(), num * num_preds_per_class, num_classes, 1,
prob_gpu_data);
conf_gpu_data = prob_blob.gpu_data();
}
// Compute the loss.
Blob<Dtype> conf_loss_blob(num, num_preds_per_class, 1, 1);
Dtype* conf_loss_gpu_data = conf_loss_blob.mutable_gpu_data();
const int num_threads = num * num_preds_per_class;
// NOLINT_NEXT_LINE(whitespace/operators)
ComputeConfLossKernel<Dtype><<<CAFFE_GET_BLOCKS(num_threads),
CAFFE_CUDA_NUM_THREADS>>>(num_threads, conf_gpu_data, num_preds_per_class,
num_classes, loss_type, match_blob.gpu_data(), conf_loss_gpu_data);
// Save the loss.
all_conf_loss->clear();
const Dtype* loss_data = conf_loss_blob.cpu_data();
for (int i = 0; i < num; ++i) {
vector<float> conf_loss(loss_data, loss_data + num_preds_per_class);
all_conf_loss->push_back(conf_loss);
loss_data += num_preds_per_class;
}
}
// Explicit initialization.
template void ComputeConfLossGPU(const Blob<float>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
template void ComputeConfLossGPU(const Blob<double>& conf_data, const int num,
const int num_preds_per_class, const int num_classes,
const int background_label_id, const ConfLossType loss_type,
const vector<map<int, vector<int> > >& all_match_indices,
const map<int, vector<NormalizedBBox> >& all_gt_bboxes,
vector<vector<float> >* all_conf_loss);
} // namespace caffe
|
78daa93f222a89fb55c7d5d0782f2e3e5f477015.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! MinimumE <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumE(
const int count,
const T* x1,
const T* x2,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = min(x1[idx], x2[idx]);
}
}
template <> void MinimumE<float, CUDAContext>(
const int count,
const float* x1,
const float* x2,
float* y,
CUDAContext* ctx) {
_MinimumE<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, y);
}
/*! MinimumB <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumB(
const int count,
const T* x1,
const T x2,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = min(x1[idx], x2);
}
}
template <> void MinimumB<float, CUDAContext>(
const int count,
const float* x1,
const float x2,
float* y,
CUDAContext* ctx) {
_MinimumB<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, y);
}
/*! MinimumEGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumEGrad(
const int count,
const T* x1,
const T* x2,
const T* dy,
T* dx1,
T* dx2) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const bool dy_to_dx1 = x1[idx] < x2[idx];
dx1[idx] = dy_to_dx1 ? dy[idx] : 0;
dx2[idx] = dy_to_dx1 ? 0 : dy[idx];
}
}
template <> void MinimumEGrad<float, CUDAContext>(
const int count,
const float* x1,
const float* x2,
const float* dy,
float* dx1,
float* dx2,
CUDAContext* ctx) {
_MinimumEGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, dy, dx1, dx2);
}
/*! MinimumBGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumBGrad(
const int count,
const T* x1,
const T x2,
const T* dy,
T* dx1) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx1[idx] = (x1[idx] < x2) ? dy[idx] : 0;
}
}
template <> void MinimumBGrad<float, CUDAContext>(
const int count,
const float* x1,
const float x2,
const float* dy,
float* dx1,
/* float* dx2, */
CUDAContext* ctx) {
_MinimumBGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, dy, dx1);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA | 78daa93f222a89fb55c7d5d0782f2e3e5f477015.cu | #ifdef WITH_CUDA
#include "core/context_cuda.h"
#include "utils/op_kernel.h"
namespace dragon {
namespace kernel {
/*! MinimumE <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumE(
const int count,
const T* x1,
const T* x2,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = min(x1[idx], x2[idx]);
}
}
template <> void MinimumE<float, CUDAContext>(
const int count,
const float* x1,
const float* x2,
float* y,
CUDAContext* ctx) {
_MinimumE<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, y);
}
/*! MinimumB <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumB(
const int count,
const T* x1,
const T x2,
T* y) {
CUDA_1D_KERNEL_LOOP(idx, count) {
y[idx] = min(x1[idx], x2);
}
}
template <> void MinimumB<float, CUDAContext>(
const int count,
const float* x1,
const float x2,
float* y,
CUDAContext* ctx) {
_MinimumB<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, y);
}
/*! MinimumEGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumEGrad(
const int count,
const T* x1,
const T* x2,
const T* dy,
T* dx1,
T* dx2) {
CUDA_1D_KERNEL_LOOP(idx, count) {
const bool dy_to_dx1 = x1[idx] < x2[idx];
dx1[idx] = dy_to_dx1 ? dy[idx] : 0;
dx2[idx] = dy_to_dx1 ? 0 : dy[idx];
}
}
template <> void MinimumEGrad<float, CUDAContext>(
const int count,
const float* x1,
const float* x2,
const float* dy,
float* dx1,
float* dx2,
CUDAContext* ctx) {
_MinimumEGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, dy, dx1, dx2);
}
/*! MinimumBGrad <T = float32, Device = CUDA> */
template <typename T>
__global__ void _MinimumBGrad(
const int count,
const T* x1,
const T x2,
const T* dy,
T* dx1) {
CUDA_1D_KERNEL_LOOP(idx, count) {
dx1[idx] = (x1[idx] < x2) ? dy[idx] : 0;
}
}
template <> void MinimumBGrad<float, CUDAContext>(
const int count,
const float* x1,
const float x2,
const float* dy,
float* dx1,
/* float* dx2, */
CUDAContext* ctx) {
_MinimumBGrad<float>
<< < CUDA_BLOCKS(count), CUDA_THREADS,
0, ctx->cuda_stream() >> >
(count, x1, x2, dy, dx1);
}
} // namespace kernel
} // namepsace dragon
#endif // WITH_CUDA |
9b1e1957c91a21949a84584031aed3a31b0651a5.hip | // !!! This is a file automatically generated by hipify!!!
/*
cudakernels.cu
Developed for the master thesis project: GPU-accelerated Thermodynamic Topology Optimization
Author: Wan Arif bin Wan Abhar
Institution: Ruhr Universitaet Bochum
*/
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <cmath>
#include <ctime>
#include <iostream>
#include "../include/cudakernels.h"
#define CUDA_CALL( call ) \
{ \
hipError_t err = call; \
if ( hipSuccess != err){ \
fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,hipGetErrorString(err));exit(EXIT_FAILURE);}\
}
using namespace std;
// Self-defined double-precision atomicAdd function for nvidia GPUs with Compute Capability 6 and below.
// Pre-defined atomicAdd() with double-precision does not work for pre-CC7 nvidia GPUs.
__device__
double atomicAdd_double(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
// determines 1-dimensional CUDA block and grid sizes based on the number of rows N
__host__
void calculateDimensions(size_t N, dim3 &gridDim, dim3 &blockDim)
{
if ( N <= 1024 )
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
}
else
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = (int)ceil(N/blockDim.x)+1; gridDim.y = 1; gridDim.z = 1;
}
}
// determines 2-dimensional CUDA block and grid sizes based on the number of rows N
__host__ void calculateDimensions2D(size_t Nx, size_t Ny, dim3 &gridDim, dim3 &blockDim)
{
if ( Nx <= 32 && Ny <= 32)
{
blockDim.x = 32; blockDim.y = 32; blockDim.z = 1;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
}
else
{
blockDim.x = 32; blockDim.y = 32; blockDim.z = 1;
gridDim.x = (int)ceil(Nx/blockDim.x)+1; gridDim.y = (int)ceil(Ny/blockDim.y)+1; gridDim.z = 1;
}
}
// calculates the DOF of a grid with dimensions
__host__ size_t calcDOF(size_t Nx, size_t Ny, size_t dim)
{
return (Nx + 1) * (Ny + 1) * dim;
}
// returns value of an ELLPack matrix A at (x,y)
__device__
double valueAt(size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[x * max_row_size + k] == y)
return vValue[x * max_row_size + k];
}
return 0.0;
}
// returns value of a transposed ELLPack matrix A at (row,col)
__device__
double valueAt_(size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + row] == col)
return vValue[k * num_rows + row];
}
return 0.0;
}
__device__
void setAt( size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows, double value )
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + col] == row)
{
vValue[k * num_rows + col] = value;
k = max_row_size; // to exit for loop
}
}
}
// a[] = 0.0
__global__
void setToZero(double* a, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = 0.0;
}
// a = 1
__global__
void setToOne(double* a)
{
*a = 1;
}
// norm = x.norm()
__global__
void norm_GPU(double* norm, double* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id == 0 )
*norm = 0;
__syncthreads();
if ( id < num_rows )
{
#if __CUDA_ARCH__ < 600
atomicAdd_double( norm, x[id]*x[id] );
#else
atomicAdd( norm, x[id]*x[id] );
#endif
}
__syncthreads();
if ( id == 0 )
*norm = sqrt(*norm);
}
// a[] = 0, size_t
__global__
void setToZero(size_t* a, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = 0.0;
}
// bool = true
__global__
void setToTrue( bool *foo )
{
*foo = true;
}
// x = sqrt(x)
__global__
void sqrt_GPU(double *x)
{
*x = sqrt(*x);
}
// sum = sum( x[n]*x[n] )
__global__
void sumOfSquare_GPU(double* sum, double* x, size_t n)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
double temp = 0.0;
while(id < n)
{
temp += x[id]*x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reset id
id = threadIdx.x + blockDim.x*blockIdx.x;
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__global__
void LastBlockSumOfSquare_GPU(double* sum, double* x, size_t n, size_t counter)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
if ( id >= counter*blockDim.x && id < n )
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, x[id]*x[id]);
#else
atomicAdd(sum, x[id]*x[id]);
#endif
}
}
__host__
void norm_GPU(double* d_norm, double* d_x, size_t N, dim3 gridDim, dim3 blockDim)
{
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_norm, 1);
// getting the last block's size
size_t lastBlockSize = N;
size_t counter = 0;
if ( N % gridDim.x == 0 ) {}
else
{
while ( lastBlockSize >= gridDim.x)
{
counter++;
lastBlockSize -= gridDim.x;
}
}
// sum of squares for the full blocks
hipLaunchKernelGGL(( sumOfSquare_GPU), dim3(gridDim.x - 1), dim3(blockDim), 0, 0, d_norm, d_x, (gridDim.x - 1)*blockDim.x);
// sum of squares for the last incomplete block
hipLaunchKernelGGL(( LastBlockSumOfSquare_GPU), dim3(1), dim3(lastBlockSize), 0, 0, d_norm, d_x, N, counter);
hipLaunchKernelGGL(( sqrt_GPU), dim3(1),dim3(1), 0, 0, d_norm );
}
//// DEBUG:
//// helper functions for debugging
__global__
void print_GPU(double* x)
{
printf("[GPU] x = %e\n", *x);
}
__global__
void print_GPU(int* x)
{
printf("[GPU] x = %d\n", *x);
}
__global__
void print_GPU(size_t* x)
{
printf("[GPU] x = %lu\n", *x);
}
__global__
void print_GPU(bool* x)
{
printf("[GPU] x = %d\n", *x);
}
__global__ void printLinearVector_GPU(size_t* x, size_t i, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++ )
printf("%lu ", x[j+i*num_cols]);
printf("\n");
}
__global__ void printLinearVector_GPU(double* x, size_t i, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++ )
printf("%g ", x[j+i*num_cols]);
printf("\n");
}
__host__ void printLinearVector(size_t* x, size_t num_rows, size_t num_cols)
{
for(int i = 0 ; i < num_rows ; i++ )
{
hipLaunchKernelGGL(( printLinearVector_GPU), dim3(1),dim3(1), 0, 0, x, i, num_rows, num_cols);
hipDeviceSynchronize();
}
}
__host__ void printLinearVector(double* x, size_t num_rows, size_t num_cols)
{
for(int i = 0 ; i < num_rows ; i++ )
{
hipLaunchKernelGGL(( printLinearVector_GPU), dim3(1),dim3(1), 0, 0, x, i, num_rows, num_cols);
hipDeviceSynchronize();
}
}
__global__ void print_GPU_(double* x, size_t i)
{
printf("%d %g\n", i, x[i]);
}
__host__ void printVector(double* x, size_t num_rows)
{
for ( int i = 0 ; i < num_rows ; i++ )
hipLaunchKernelGGL(( print_GPU_), dim3(1),dim3(1), 0, 0, x, i );
}
__global__
void printVector_GPU(double* x)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
printf("[GPU] x[%d] = %e\n", id, x[id]);
}
__global__
void printVector_GPU(double* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
printf("%d %e\n", id, x[id]);
}
__global__
void printVector_GPU(std::size_t* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
printf("%d %lu\n", id, x[id]);
}
__global__
void printVector_GPU(int* x)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
printf("[GPU] x[%d] = %d\n", id, x[id]);
}
__global__
void printELL_GPU(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int i = 0 ; i < num_rows ; i++)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%f ", valueAt(i, j, value, index, max_row_size) );
printf("\n");
}
}
__global__
void printELL_GPU_(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int i = 0 ; i < num_rows ; i++)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%g ", valueAt_(i, j, value, index, max_row_size, num_rows) );
printf("\n");
}
}
__global__
void printELLrow_GPU(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%.3f ", valueAt(row, j, value, index, max_row_size) );
printf("\n");
}
__host__
void printELLrow(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( size_t i = 0 ; i < num_rows ; i++ )
{
hipLaunchKernelGGL(( printELLrow_GPU), dim3(1),dim3(1), 0, 0, i, value, index, max_row_size, num_rows, num_cols);
hipDeviceSynchronize();
}
}
// prints matrix with size (num_rows, num_cols) that is stored in a transposed ELLPACK format
__global__
void printELLrow_GPU_(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%.3f ", valueAt_(row, j, value, index, max_row_size, num_rows) );
printf("\n");
}
__host__
void printELLrow_(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( size_t i = 0 ; i < num_rows ; i++ )
{
hipLaunchKernelGGL(( printELLrow_GPU_), dim3(1),dim3(1), 0, 0, i, value, index, max_row_size, num_rows, num_cols);
hipDeviceSynchronize();
}
}
// (scalar) a = b
__global__
void equals_GPU(double* a, double* b)
{
*a = *b;
}
// x = a * b
__global__
void dotProduct_GPU(double* x, double* a, double* b, size_t num_rows)
{
unsigned int id = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
double temp = 0.0;
// filling in the shared variable
while(id < num_rows){
temp += a[id]*b[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
#if __CUDA_ARCH__ < 600
atomicAdd_double(x, cache[0]);
#else
atomicAdd(x, cache[0]);
#endif
}
__syncthreads();
}
__global__
void LastBlockDotProduct(double* dot, double* x, double* y, size_t starting_index)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + starting_index;
#if __CUDA_ARCH__ < 600
atomicAdd_double(dot, x[id]*y[id]);
#else
atomicAdd(dot, x[id]*y[id]);
#endif
}
// dot = a[] * b[]
__host__
void dotProduct(double* dot, double* a, double* b, size_t N, dim3 gridDim, dim3 blockDim)
{
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, dot, 1 );
// getting the last block's size
size_t lastBlockSize = blockDim.x - ( (gridDim.x * blockDim.x ) - N );
if ( N < blockDim.x)
{
hipLaunchKernelGGL(( LastBlockDotProduct), dim3(1), dim3(N), 0, 0, dot, a, b, 0 );
}
else
{
// dot products for the full blocks
hipLaunchKernelGGL(( dotProduct_GPU), dim3(gridDim.x - 1), dim3(blockDim), 0, 0, dot, a, b, (gridDim.x - 1)*blockDim.x );
// dot products for the last incomplete block
hipLaunchKernelGGL(( LastBlockDotProduct), dim3(1), dim3(lastBlockSize), 0, 0, dot, a, b, ( (gridDim.x - 1) * blockDim.x ) );
}
}
// x = y / z
__global__
void divide_GPU(double *x, double *y, double *z)
{
*x = *y / *z;
}
// x += y
__global__ void add_GPU(double *x, double *y)
{
*x += *y;
}
// x -= y
__global__ void minus_GPU(double *x, double *y)
{
*x -= *y;
}
// x += c
__global__
void addVector_GPU(double *x, double *c, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
x[id] += c[id];
}
// a = b
__global__
void vectorEquals_GPU(double* a, double* b, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = b[id];
}
////////////////////////////////////////////
// ASSEMBLER
////////////////////////////////////////////
__host__
vector<vector<size_t>> applyBC(vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim)
{
vector<vector<size_t>> bc_index(numLevels);
vector<size_t> nodesPerDim;
for( int i = 0 ; i < N.size() ; i++ )
nodesPerDim.push_back(N[i]+1);
if ( bc_case == 0 )
{
// base level
size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
for ( int i = 0 ; i < nodesPerDim[1] ; i++ )
{
bc_index[0].push_back(i*nodesPerDim[0]*dim);
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[0].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j);
}
}
// y-direction boundary condition at bottom right node
bc_index[0].push_back(dim*N[0] + 1 );
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[0].push_back(dim*N[0] + 1 + totalNodes2D*3*j);
}
// finer levels
for ( int lev = 1 ; lev < numLevels ; lev++ )
{
for( int i = 0 ; i < N.size() ; i++ )
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
for ( int i = 0 ; i < nodesPerDim[1] ; i++ )
{
bc_index[lev].push_back(i*nodesPerDim[0]*dim);
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[lev].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j);
}
}
// y-direction boundary condition at bottom right node
bc_index[lev].push_back(nodesPerDim[0]*dim - (dim-1));
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[lev].push_back(dim*nodesPerDim[0] - (dim-1) + totalNodes2D*3*j);
}
}
}
else if ( bc_case == 1 )
{
if ( N.size() < 3 )
throw(runtime_error("Error : Boundary condition case 1 is not set up yet for 2D"));
// base level
size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
// plane where u2 = 0
for ( int i = 0 ; i < totalNodes2D ; i++ )
bc_index[0].push_back(i*dim + 2);
// 2 points with pinned BC
bc_index[0].push_back( totalNodes2D*3*N[2] );
bc_index[0].push_back( totalNodes2D*3*N[2] + 1 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 2 );
bc_index[0].push_back( totalNodes2D*3*N[2] + (N[0]+1) * (N[1]) * 3 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 1 + (N[0]+1) * (N[1]) * 3 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 2 + (N[0]+1) * (N[1]) * 3 );
// finer levels
for ( int lev = 1 ; lev < numLevels ; lev++ )
{
for( int i = 0 ; i < N.size() ; i++ )
{
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
N[i] *= 2;
}
totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
// plane where u2 = 0
for ( int i = 0 ; i < totalNodes2D ; i++ )
bc_index[lev].push_back(i*dim + 2);
// 2 points with pinned BC
bc_index[lev].push_back( totalNodes2D*3*N[2] );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 1 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 2 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + (N[0]+1) * (N[1]) * 3 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 1 + (N[0]+1) * (N[1]) * 3 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 2 + (N[0]+1) * (N[1]) * 3 );
}
}
return bc_index;
}
__host__
void applyLoad(vector<double> &b, vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim, double force)
{
if ( bc_case == 0 )
{
vector<size_t> nodesPerDim;
for ( int i = 0 ; i < N.size() ; i++)
nodesPerDim.push_back(N[i]+1);
size_t index = 0;
for ( int lev = 0 ; lev < numLevels - 1 ; lev++)
{
for ( int i = 0 ; i < N.size() ; i++)
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
}
index = dim * nodesPerDim[0] * ( nodesPerDim[1] - 1 ) + 1;
b[index] = force;
if ( dim == 3 )
{
for ( int i = 1 ; i < nodesPerDim[2] ; i++ )
{
index = index + (nodesPerDim[0]*nodesPerDim[1])*dim;
b[index] = force;
}
}
}
else if ( bc_case == 1 )
{
if ( N.size() < 3 )
throw(runtime_error("Error : Load case 1 is not set up yet for 2D"));
// obtaining the finest grid's number of elements on the x-axis
size_t Nx_fine = N[0];
for ( int lev = 0 ; lev < numLevels - 1 ; lev++)
Nx_fine *= 2;
size_t index = (Nx_fine+1)*dim - 2;
b[index] = force;
}
}
// adds local stiffness matrix of an element to the global stiffness matrix
__global__
void assembleGlobalStiffness_GPU(
size_t numElements, // total number of elements
size_t dim, // dimension
double* chi, // the updated design variable value of each element
double* A_local, // local stiffness matrix
size_t num_rows_l, // local stiffness matrix's number of rows
double* value, // global element's ELLPACK value vector
size_t* index, // global element's ELLPACK index vector
size_t max_row_size, // global element's ELLPACK maximum row size
size_t num_rows, // global element's ELLPACK number of rows
size_t* node_index, // vector that contains the corresponding global indices of the node's local indices
size_t p
)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < numElements )
{
int numNodesPerElement = pow(2,dim);
for ( int row = 0 ; row < num_rows_l ; row++ )
{
int y = dim*node_index[ (row/dim) + (id*numNodesPerElement) ] + ( row % dim );
for ( int col = 0 ; col < num_rows_l ; col++ )
{
int x = dim*node_index[ (col/dim) + (id*numNodesPerElement) ] + ( col % dim );
atomicAddAt( x, y, value, index, max_row_size, num_rows, pow(chi[id],p)*A_local[ ( col + row*num_rows_l ) ] );
}
}
}
}
// applies boundary condition on the global stiffness matrix (2d case) where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyMatrixBC2D_GPU(double* value, size_t* index, size_t max_row_size, size_t* bc_index, size_t num_rows, size_t bc_size, size_t Nx, size_t Ny, size_t dim)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < bc_size )
{
// assigning each thread to a single bc index
size_t bc = bc_index[id];
// setting the row entries to '0'
for ( int i = 0 ; i < max_row_size ; i++ )
{
value[ bc + i*num_rows ] = 0.0;
}
// setting the diagonal to '1'
// setting the column entries to '0' through the neighbouring nodes
int base_id = (bc - bc%dim);
bool south = ( bc >= (Nx + 1)*dim );
bool north = ( bc < (Nx+1)*(Ny)*dim );
bool west = ( (bc) % ((Nx + 1)*dim) >= dim );
bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 );
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// origin
{
// setting the diagonal to '1'
setAt( bc, bc, value, index, max_row_size, num_rows, 1.0 );
// and other DOFs on the node to '0'
for ( int i = 1 ; i < dim ; i++)
setAt( bc, bc + i, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( base_id == 0 || east )
{
for ( int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
}
}
// applies boundary condition on the global stiffness matrix (3d case) where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyMatrixBC3D_GPU(double* value, size_t* index, size_t max_row_size, size_t* bc_index, size_t num_rows, size_t bc_size, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < bc_size )
{
// assigning each thread to a single bc index
size_t bc = bc_index[id];
// setting the row entries to '0'
for ( int i = 0 ; i < max_row_size ; i++ )
{
value[ bc + i*num_rows ] = 0.0;
}
// setting the column entries to '0' through the neighbouring nodes
size_t base_id = (id - id%dim);
size_t gridsize_2D = (Nx+1)*(Ny+1)*dim;
bool prev_layer = (bc >= (Nx+1)*(Ny+1)*dim);
bool south = ((bc) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim);
bool north = ((bc) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim);
bool west = ((bc) % ((Nx + 1)*dim) >= dim);
bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0);
//// previous layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
//// current layer
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// origin
{
// setting the diagonal to '1'
setAt( bc, bc, value, index, max_row_size, num_rows, 1.0 );
// and other DOFs on the node to '0'
for ( int i = 1 ; i < dim ; i++)
setAt( bc, bc + i, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( base_id == 0 || east )
{
for ( int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( base_id == 0 || (north && east ) )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
//// next layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
}
}
// applies boundary condition on the prolongation matrix where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyProlMatrixBC_GPU( double* value, size_t* index, size_t max_row_size,
size_t* bc_index, size_t* bc_index_,
size_t num_rows, size_t num_rows_,
size_t bc_size, size_t bc_size_)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
for ( int row = 0 ; row < max_row_size ; row++ )
{
for ( int i = 0 ; i < bc_size_ ; i++ )
{
size_t bc_row = bc_index_[i];
if ( value[ id + row*num_rows ] != 1 && index[id + row*num_rows] == bc_row )
value[id + row*num_rows ] = 0;
}
}
}
}
// input the coarse node's "index" to obtain the node's corresponding fine node index
__device__
size_t getFineNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
if ( dim == 3 )
{
size_t twoDimSize = (Nx+1)*(Ny+1);
size_t baseindex = index % twoDimSize;
size_t base_idx = baseindex % (Nx+1);
size_t fine2Dsize = (2*Nx+1)*(2*Ny+1);
size_t multiplier = index/twoDimSize;
return 2*base_idx + (baseindex/(Nx+1))*2*(2*Nx + 1) + 2*fine2Dsize*multiplier;
}
else
return (2 * (index / (Nx + 1)) * (2*Nx + 1) + 2*( index % (Nx+1)) );
}
// ////////////////////////////////////////////
// // SMOOTHERS
// ////////////////////////////////////////////
__global__ void Jacobi_Precond_GPU(double* c, double* value, size_t* index, size_t max_row_size, double* r, size_t num_rows, double damp){
int id = blockDim.x * blockIdx.x + threadIdx.x;
// B = damp / diag(A);
if ( id < num_rows )
c[id] = r[id] * damp / valueAt_(id, id, value, index, max_row_size, num_rows);
}
// ////////////////////////////////////////////
// // SOLVER
// ////////////////////////////////////////////
__global__
void checkIterationConditions(bool* foo, size_t* step, double* res, double* res0, double* m_minRes, double* m_minRed, size_t m_maxIter)
{
if ( *res > *m_minRes && *res > *m_minRed*(*res0) && (*step) <= m_maxIter )
{
*foo = true;
}
else
*foo = false;
}
__global__
void checkIterationConditionsBS(bool* foo, size_t* step, size_t m_maxIter, double* res, double* m_minRes)
{
if ( *res > 1e-12 && (*step) <= m_maxIter )
{
*foo = true;
}
else
*foo = false;
}
__global__
void printInitialResult_GPU(double* res0, double* m_minRes, double* m_minRed)
{
printf(" 0 %e %9.3e ----- -------- %9.3e \n", *res0, *m_minRes, *m_minRed);
}
/// r = b - A*x
__global__
void ComputeResiduum_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r,
double* b)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
// sum += value[offset] * x[ index[offset] ];
sum += value[offset] * __ldg( &x[ index[offset] ] );
}
r[id] = b[id] - sum;
}
}
/// r = r - A*x
__global__
void UpdateResiduum_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
sum += value[offset] * __ldg( &x[ index[offset] ] );
}
r[id] = r[id] - sum;
}
}
// Ax = r for transposed ELLPACK format
__global__ void Apply_GPU (
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
sum += value[offset] * x[ index[offset] ];
}
r[id] = sum;
}
}
/// r = A^T * x for transposed ELLPACK format
/// NOTE: This kernel should be run with A's number of rows as the number of threads
__global__
void ApplyTransposed_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value, // A's ELL value array
const std::size_t* index, // A's ELL index array
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
for ( int n = 0; n < max_row_size; n++ )
{
int col = index [ id + n*num_rows ];
double val = value [ id + n*num_rows ];
#if __CUDA_ARCH__ < 600
atomicAdd_double( &r[col], val*x[id] );
#else
atomicAdd( &r[col], val*x[id] );
#endif
}
}
}
// outputs result in the terminal
__global__
void printResult_GPU(size_t* step, double* res, double* m_minRes, double* lastRes, double* res0, double* m_minRed)
{
if(*step < 10)
printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed);
else
printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed);
}
// increases the iteration step
__global__ void addStep(size_t* step){
++(*step);
}
// p = z + p * (rho / rho_old);
__global__
void calculateDirectionVector(
size_t* d_step,
double* d_p,
double* d_z,
double* d_rho,
double* d_rho_old,
size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
if(*d_step == 1)
{
d_p[id] = d_z[id];
}
else
{
// p *= (rho / rho_old)
d_p[id] = d_p[id] * ( *d_rho / (*d_rho_old) );
// p += z;
d_p[id] = d_p[id] + d_z[id];
}
}
}
// d_alpha = *d_rho / ( d_p * d_z )
__host__
void calculateAlpha(
double* d_alpha,
double* d_rho,
double* d_p,
double* d_z,
double* d_alpha_temp,
size_t num_rows,
dim3 gridDim,
dim3 blockDim)
{
hipLaunchKernelGGL(( setToZero), dim3(1),dim3(1), 0, 0, d_alpha_temp, 1);
// alpha_temp = ( p * z )
dotProduct(d_alpha_temp, d_p, d_z, num_rows, gridDim, blockDim);
// d_alpha = *d_rho / (*alpha_temp)
hipLaunchKernelGGL(( divide_GPU), dim3(1),dim3(1), 0, 0, d_alpha, d_rho, d_alpha_temp);
}
// x = x + alpha * p
__global__
void axpy_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
d_x[id] += (*d_alpha * d_p[id]);
}
// x = x - alpha * p
__global__
void axpy_neg_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
d_x[id] = d_x[id] - (*d_alpha * d_p[id]);
}
//// TDO
// calculates the driving force of all elements
// one thread computes one element
// df[] = 0.5 * p * pow(chi[], p-1) / local_volume * u[]^T * A * u[]
__global__
void calcDrivingForce( double *df, // driving force
double *u, // displacement vector
double* chi, // design variable
double p, // penalization parameter
size_t* node_index, // node index array
double* d_A_local, // local stiffness matrix
size_t num_rows, // num_rows of local stiffness matrix
size_t dim, // dimension
double local_volume,
size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < numElements)
{
double temp[24];
size_t numNodesPerElement = pow(2,dim);
df[id] = 0;
for ( int n = 0; n < num_rows; n++ )
{
temp[n]=0;
for ( int m = 0; m < num_rows; m++)
{
// converts local node to global node
int global_col = ( node_index [ (m / dim) + id*numNodesPerElement ] * dim ) + ( m % dim );
temp[n] += u[global_col] * d_A_local[ n + m*num_rows ];
}
}
for ( int n = 0; n < num_rows; n++ )
{
int global_col = ( node_index [ (n / dim) + id*numNodesPerElement ] * dim ) + ( n % dim );
df[id] += temp[n] * u[global_col];
}
df[id] *= 0.5 * p * pow(chi[id], p-1) / local_volume;
}
}
// sum = sum(x)
// n = size of x vector
__global__
void sumOfVector_GPU(double* sum, double* x, size_t n)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < n)
{
temp += x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
// laplacian for both 2d and 3d cases
// for 2d, Nz has to be predefined to '1'
__device__
double laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz, double h )
{
bool east = ( (ind + 1) % Nx != 0 );
bool north = ( ind + Nx < Nx*Ny );
bool west = ( ind % Nx != 0 );
bool south = ( ind >= Nx );
bool previous_layer = (ind >= Nx*Ny);
bool next_layer = (ind < Nx*Ny*(Nz-1));
double value = -4.0 * array[ind];
// east element
if ( east )
value += 1.0 * array[ind + 1];
else
value += 1.0 * array[ind];
// north element
if ( north )
value += 1.0 * array[ind + Nx];
else
value += 1.0 * array[ind];
// west element
if ( west )
value += 1.0 * array[ind - 1];
else
value += 1.0 * array[ind];
// south element
if ( south )
value += 1.0 * array[ind - Nx];
else
value += 1.0 * array[ind];
// if 3D
if (Nz > 0)
{
value -= 2.0 * array[ind];
// previous layer's element
if ( previous_layer )
value += 1.0 * array[ind - (Nx*Ny)];
else
value += 1.0 * array[ind];
if ( next_layer )
value += 1.0 * array[ind + (Nx*Ny)];
else
value += 1.0 * array[ind];
}
return value/(h*h);
}
__global__
void calcLambdaUpper(double *df_array, double *max, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[1024];
*max = -1.0e9;
*mutex = 0;
double temp = -1.0e9;
while(index + offset < numElements){
temp = fmaxf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) );
// temp = fmaxf(temp, ( df_array[index + offset] + *eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
__global__
void calcLambdaLower(double *df_array, double *min, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[1024];
*min = 1.0e9;
*mutex = 0;
double temp = 1.0e9;
if ( index < numElements )
{
while(index + offset < numElements){
temp = fminf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) );
// temp = fminf(temp, ( df_array[index + offset] - *eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*min = fminf(*min, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
}
__global__
void calcChiTrial(
double *chi,
double *df,
double *lambda_trial,
double del_t,
double* eta,
double* beta,
double* chi_trial,
size_t Nx,
size_t Ny,
size_t Nz,
size_t numElements,
double h
)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < numElements )
{
double del_chi;
del_chi = ( del_t / *eta ) * ( df[id] - *lambda_trial + (*beta)*( laplacian_GPU( chi, id, Nx, Ny, Nz, h ) ) );
if ( del_chi + chi[id] > 1 )
chi_trial[id] = 1;
else if ( del_chi + chi[id] < 1e-9 )
chi_trial[id] = 1e-9;
else
chi_trial[id] = del_chi + chi[id];
}
}
__global__
void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial)
{
if ( *rho_trial > rho )
*lambda_l = *lambda_trial;
else
*lambda_u = *lambda_trial;
*lambda_trial = 0.5 * ( *lambda_l + *lambda_u );
}
__global__ void calcRhoTrial(double* rho_tr, double local_volume, size_t numElements)
{
double total_volume = local_volume * numElements;
*rho_tr *= local_volume;
*rho_tr /= total_volume;
}
// calculate the average weighted driving force, p_w
__global__
void calcP_w_GPU(double* p_w, double* df, double* uTAu, double* chi, int p, double local_volume, size_t numElements)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double int_g_p[1024];
__shared__ double int_g[1024];
if( id < numElements)
{
df[id] = uTAu[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1);
int_g_p[id] = (chi[id] - 1e-9)*(1-chi[id]) * df[id] * local_volume;
int_g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
__syncthreads();
if ( id == 0 )
{
for ( int i = 1 ; i < numElements ; ++i )
int_g_p[0] += int_g_p[i];
}
if ( id == 1 )
{
for ( int i = 1 ; i < numElements ; ++i )
int_g[0] += int_g[i];
}
__syncthreads();
if ( id == 0 )
*p_w = int_g_p[0] / int_g[0];
}
}
__global__
void calc_g_GPU(double*g, double* chi, size_t numElements, double local_volume)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if (id < numElements)
{
g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
// if ( id == 0 )
// printf("%f\n", g[id]);
}
}
__global__
void calc_g_GPU_(double* sum, double* chi, size_t numElements, double local_volume)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < numElements )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
// sum = sum ( df * g * local_volume)
__global__
void calcSum_df_g_GPU(double* sum, double* df, double* g, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += df[id]*g[id]; // local volume is already included in g, i.e. g = g*local_volume
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__host__
void calcP_w(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, double* g, double* df_g, size_t numElements, double local_volume)
{
dim3 gridDim;
dim3 blockDim;
calculateDimensions(numElements, gridDim, blockDim);
// calculate g of each element * local_volume
hipLaunchKernelGGL(( calc_g_GPU), dim3(gridDim), dim3(blockDim), 0, 0, g, chi, numElements, local_volume);
// calculate sum_g = sum(g)
hipLaunchKernelGGL(( sumOfVector_GPU), dim3(gridDim), dim3(blockDim), 0, 0, sum_g, g, numElements);
// sum_df_g = sum( g[i]*df[i]*local_volume )
hipLaunchKernelGGL(( calcSum_df_g_GPU), dim3(gridDim), dim3(blockDim), 0, 0, sum_df_g, df, g, numElements);
// p_w = sum_df_g / sum_g
hipLaunchKernelGGL(( divide_GPU), dim3(1),dim3(1), 0, 0, p_w, sum_df_g, sum_g);
}
// sum = sum ( df * g * local_volume)
__global__
void calcSum_df_g_GPU_(double* sum, double* df, double* chi, size_t numElements, double local_volume)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += df[id]* ( (chi[id] - 1e-9)*(1-chi[id]) * local_volume );
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__host__
void calcP_w_(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, size_t numElements, double local_volume)
{
dim3 gridDim;
dim3 blockDim;
calculateDimensions(numElements, gridDim, blockDim);
// calculate g of each element * local_volume
// calculate sum_g = sum(g)
hipLaunchKernelGGL(( calc_g_GPU_), dim3(gridDim), dim3(blockDim), 0, 0, sum_g, chi, numElements, local_volume);
// sum_df_g = sum( g[i]*df[i]*local_volume )
hipLaunchKernelGGL(( calcSum_df_g_GPU_), dim3(gridDim), dim3(blockDim), 0, 0, sum_df_g, df, chi, numElements, local_volume);
// p_w = sum_df_g / sum_g
hipLaunchKernelGGL(( divide_GPU), dim3(1),dim3(1), 0, 0, p_w, sum_df_g, sum_g);
}
// two threads to calculate eta and beta
__global__ void calcEtaBeta( double* eta, double* beta, double etastar, double betastar, double* p_w )
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id == 0 )
*eta = etastar * (*p_w);
if ( id == 1 )
*beta = betastar * (*p_w);
}
// convergence check in for the bisection algorithm in the density update process
__global__ void checkTDOConvergence(bool* foo, double rho, double* rho_trial)
{
if ( abs(rho - *rho_trial) < 1e-7 )
*foo = false;
else
*foo = true;
}
// computes and fills in the global stiffness matrix's ELL index array for 2d case
__global__ void fillIndexVector2D_GPU(size_t* index, size_t Nx, size_t Ny, size_t max_row_size, size_t num_rows)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
int counter = 0;
int dim = 2;
if ( id < num_rows )
{
int base_id = (id - id%dim);
// south-west
if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i;
counter++;
}
}
// south
if ( id >= (Nx + 1)*dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i;
counter++;
}
}
// south-east
if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i;
counter++;
}
}
// west
if ( (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i;
counter++;
}
}
// origin
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i;
counter++;
}
// east
if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i;
counter++;
}
}
// north-west
if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i;
counter++;
}
}
// north
if ( id < (Nx+1)*(Ny)*dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i;
counter++;
}
}
// north-east
if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i;
counter++;
}
}
for ( int i = counter ; i < max_row_size; i++)
{
index[id + i*num_rows] = num_rows;
}
}
}
// computes and fills in the global stiffness matrix's ELL index array for 3d case
__global__ void fillIndexVector3D_GPU(size_t* index, size_t Nx, size_t Ny, size_t Nz, size_t max_row_size, size_t num_rows)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
int counter = 0;
int dim = 3;
if ( id < num_rows )
{
size_t base_id = (id - id%dim);
size_t gridsize_2D = (Nx+1)*(Ny+1)*dim;
// boolean variables that returns true if the neighbouring node exists
bool prev_layer = (id >= (Nx+1)*(Ny+1)*dim);
bool next_layer = (id < (Nx+1)*(Ny+1)*(Nz)*dim);
bool south = ((id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim);
bool north = ((id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim);
bool west = ((id) % ((Nx + 1)*dim) >= dim);
bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0);
//// previous layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i - gridsize_2D;
counter++;
}
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i - gridsize_2D;
counter++;
}
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i - gridsize_2D;
counter++;
}
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i - gridsize_2D;
counter++;
}
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i - gridsize_2D;
counter++;
}
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i - gridsize_2D;
counter++;
}
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i - gridsize_2D;
counter++;
}
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i - gridsize_2D;
counter++;
}
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i - gridsize_2D;
counter++;
}
}
//// current layer
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i;
counter++;
}
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i;
counter++;
}
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i;
counter++;
}
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i;
counter++;
}
}
// origin
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i;
counter++;
}
// east
if ( base_id == 0 || east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i;
counter++;
}
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i;
counter++;
}
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i;
counter++;
}
}
// north-east
if ( base_id == 0 || (north && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i;
counter++;
}
}
//// next layer
// south-west
if ( next_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i + gridsize_2D;
counter++;
}
}
// south
if ( next_layer && south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i + gridsize_2D;
counter++;
}
}
// south-east
if ( next_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i + gridsize_2D;
counter++;
}
}
// west
if ( next_layer && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i + gridsize_2D;
counter++;
}
}
// origin
if ( next_layer )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i + gridsize_2D;
counter++;
}
}
// east
if ( base_id == 0 || ( next_layer && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i + gridsize_2D;
counter++;
}
}
// north-west
if ( next_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i + gridsize_2D;
counter++;
}
}
// north
if ( next_layer && north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i + gridsize_2D;
counter++;
}
}
// north-east
if ( base_id == 0 || (next_layer && north && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i + gridsize_2D;
counter++;
}
}
for ( int i = counter ; i < max_row_size; i++)
{
index[id + i*num_rows] = num_rows;
}
}
}
// assembles the prolongation matrix for 2d case
// the ELL value and index arrays are calculated and filled
__global__ void fillProlMatrix2D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < num_rows )
{
int counter = 0;
int dim = 2;
// coarse grid
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
size_t base_id = (id - id%dim);
size_t node_index = base_id / dim;
int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim);
// if node is even numbered
bool condition1 = (node_index % 2 == 0 );
// if node exists in the coarse grid
bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) );
bool south = ( id >= (Nx + 1)*dim );
bool west = ( (id) % ((Nx + 1)*dim) >= dim );
bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 );
bool north = ( id < (Nx+1)*(Ny)*dim );
// if there exists a coarse node in the same location
if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index )
{
p_index[id + counter*num_rows] = coarse_node_index*dim + id%dim;
p_value[id + counter*num_rows] = 1;
counter++;
}
else
{
// south-west
if ( south && condition1 && !condition2 && west )
{
size_t south_west_fine_node = (node_index - (Nx+1) - 1);
size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// south
if ( south && !condition1 && !condition2 )
{
size_t south_fine_node = (node_index - (Nx+1) );
size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// south-east
if ( south && condition1 && !condition2 && east )
{
size_t south_east_fine_node = (node_index - (Nx+1) + 1);
size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// west
if ( west && condition2 )
{
size_t west_fine_node = (node_index - 1);
size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// east
if ( east && condition2 )
{
size_t east_fine_node = (node_index + 1);
size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north-west
if ( north && condition1 && !condition2 && west )
{
size_t north_west_fine_node = (node_index + (Nx+1) - 1);
size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// north
if ( north && !condition1 && !condition2 )
{
size_t north_fine_node = (node_index + (Nx+1) );
size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north-east
if ( north && condition1 && !condition2 && east )
{
size_t north_east_fine_node = (node_index + (Nx+1) + 1);
size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
}
// remaining entries are filled with num_cols
for ( int i = counter ; i < p_max_row_size; i++)
{
p_index[id + i*num_rows] = num_cols;
}
}
}
// assembles the prolongation matrix for 3d case
// the ELL value and index arrays are calculated and filled
__global__ void fillProlMatrix3D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t Nz, size_t p_max_row_size, size_t num_rows, size_t num_cols)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < num_rows )
{
int counter = 0;
int dim = 3;
// coarse grid
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
size_t Nz_ = Nz / 2;
size_t base_id = (id - id%dim);
size_t id_2D = (id) % ((Nx+1)*(Ny+1)*dim);
size_t node_index = base_id / dim;
int coarse_node_index = getCoarseNode3D_GPU(node_index, Nx, Ny, Nz);
size_t numNodes2D = (Nx+1)*(Ny+1);
// if node is even numbered
bool condition1 = ( node_index % 2 == 0 );
bool condition5 = ( (id_2D/dim) % ((Nx+1)*2) < (Nx+1) );
bool condition6 = ( node_index % (numNodes2D*2) < (Nx+1)*(Ny+1) );
// if there exists a coarse node in the same location
if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, Nz_, dim) == node_index )
{
p_index[id + counter*num_rows] = coarse_node_index*dim + id%dim;
p_value[id + counter*num_rows] = 1;
counter++;
}
// diagonals
else if ( !condition1 && !condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-south-west
fine_node = (node_index - numNodes2D - (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-south-east
fine_node = (node_index - numNodes2D - (Nx+1) + 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-north-west
fine_node = (node_index - numNodes2D + (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-north-east
fine_node = (node_index - numNodes2D + (Nx+1) + 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-south-west
fine_node = (node_index + numNodes2D - (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-south-east
fine_node = (node_index + numNodes2D - (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-north-west
fine_node = (node_index + numNodes2D + (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-north-east
fine_node = (node_index + numNodes2D + (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
}
// diagonals on x-z plane
else if ( condition1 && condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-west
fine_node = (node_index - (Nx+1)*(Ny+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// previous-east
fine_node = (node_index - (Nx+1)*(Ny+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-west
fine_node = (node_index + (Nx+1)*(Ny+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-east
fine_node = (node_index + (Nx+1)*(Ny+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// diagonals in x-y plane
else if ( condition1 && !condition5 && condition6 )
{
size_t fine_node;
size_t coarse_node;
// south-west
fine_node = (node_index - (Nx+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// south-east
fine_node = (node_index - (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// north-east
fine_node = (node_index + (Nx+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// north-east
fine_node = (node_index + (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// diagonals in y-z plane
else if ( condition1 && !condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-south
fine_node = (node_index - (Nx+1)*(Ny+1) - (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// previous-north
fine_node = (node_index - (Nx+1)*(Ny+1) + (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-south
fine_node = (node_index + (Nx+1)*(Ny+1) - (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-north
fine_node = (node_index + (Nx+1)*(Ny+1) + (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
else
{
// previous-origin
if ( !condition1 && condition5 && !condition6 )
{
// printf("%lu\n", node_index*dim );
size_t fine_node = (node_index - (Nx+1)*(Ny+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// next-origin
if ( !condition1 && condition5 && !condition6 )
{
// printf("%lu\n", node_index*dim );
size_t fine_node = (node_index + (Nx+1)*(Ny+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// south
if ( !condition1 && !condition5 && condition6 )
{
size_t fine_node = (node_index - (Nx+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// west
if ( !condition1 && condition5 && condition6 )
{
// printf("%lu\n", node_index*3 );
size_t fine_node = (node_index - 1);
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// east
if ( !condition1 && condition5 && condition6 )
{
size_t fine_node = (node_index + 1);
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north
if ( !condition1 && !condition5 && condition6 )
{
size_t fine_node = (node_index + (Nx+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
}
for ( int i = counter ; i < p_max_row_size; i++)
{
p_index[id + i*num_rows] = num_cols;
}
}
}
// obtaining a node's corresponding node on a coarser grid
__device__ int getCoarseNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
// get coarse grid dimensions
size_t Nx_ = Nx / 2;
// size_t Ny_ = Ny / 2;
// size_t Nz_ = Nz / 2;
// if node is even numbered
bool condition1 = (index % 2 == 0 );
// if node exists in the coarse grid
bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) );
if ( condition1 && condition2 )
{
return index/2 - (index/((Nx+1)*2 ))*(Nx_);
}
// -1 means the node in the coarse grid does not exist
else
return -1;
}
__device__ int getCoarseNode3D_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz)
{
// get coarse grid dimensions
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
// size_t Nz_ = Nz / 2;
size_t gridsize2D = (Nx+1)*(Ny+1);
size_t gridsize2D_ = (Nx_+1)*(Ny_+1);
// if node is even numbered
bool condition1 = ( index % 2 == 0 );
// if node exists in the coarse grid (x-y-plane)
bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) );
// if node exists in the coarse grid (y-z-plane)
bool condition3 = ( index % ((Nx+1)*(Ny+1)*2) < (Nx+1)*(Ny+1) );
if ( condition1 && condition2 && condition3 )
{
int base_id = index % gridsize2D;
return base_id/2 - (base_id/((Nx+1)*2 ))*(Nx_) + (index/(gridsize2D*2))*gridsize2D_;
// return index/2 - (index/((Nx+1)*2 ))*(Nx_);
}
// -1 means the node in the coarse grid does not exist
else
return -1;
}
// DEBUG: check to ensure mass is conserved during the density update process
__global__ void checkMassConservation(double* chi, double local_volume, size_t numElements)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double temp[1024];
if ( id < numElements)
{
// sum of chi * local_volume
temp[id] = chi[id] * local_volume;
}
__syncthreads();
if ( id == 0 )
{
for ( int i = 1 ; i < numElements ; i++ )
{
temp[0] += temp[i];
}
// total volume
double vol = local_volume * numElements;
printf("chi_trial %f\n", temp[0] / vol);
}
}
// adds the value to a transposed ELLPack matrix A at (row,col)
__device__
void atomicAddAt( size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows, double value )
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + col] == row)
{
atomicAdd( &vValue[k * num_rows + col] , value );
k = max_row_size; // to exit for loop
}
}
}
// A_coarse = P^T * A_fine * P
// A : fine stiffness matrix
// A_ : coarse stiffness matrix
// P : prolongation matrix
__global__ void PTAP(double* A_value, size_t* A_index, size_t max_row_size, size_t num_rows,
double* A_value_, size_t* A_index_, size_t max_row_size_, size_t num_rows_,
double* P_value, size_t* P_index, size_t p_max_row_size)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
if( k < num_rows )
{
for ( int i_ = 0 ; i_ < p_max_row_size ; i_++ )
{
size_t i = P_index[k + i_*num_rows];
double P_ki = P_value[k + i_*num_rows];
for( int l_ = 0 ; l_ < max_row_size ; l_++ )
{
size_t l = A_index[k + l_*num_rows];
double A_kl = A_value[k + l_*num_rows];
double P_ki_A_kl = P_ki * A_kl;
for( int j_ = 0 ; j_ < p_max_row_size ; j_++ )
{
size_t j = P_index[l + j_*num_rows];
double P_lj = P_value[l + j_*num_rows];
double P_ki_A_kl_P_lj = P_ki_A_kl * P_lj;
if(P_ki_A_kl_P_lj != 0.0)
atomicAddAt( j, i, A_value_, A_index_, max_row_size_, num_rows_, P_ki_A_kl_P_lj );
}
}
}
}
}
// calculation of compliance, c = 0.5 * sum( u^T * K * u )
// c is labelled as sum
__global__
void calcCompliance(double* sum, double* u, double* chi, size_t* node_index, double* d_A_local, double local_volume, size_t num_rows, size_t dim, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < numElements)
{
double uTKu = 0;
double temp[24];
size_t numNodesPerElement = pow(2,dim);
uTKu = 0;
for ( int n = 0; n < num_rows; n++ )
{
temp[n]=0;
for ( int m = 0; m < num_rows; m++)
{
// converts local node to global node
int global_col = ( node_index [ (m / dim) + id*numNodesPerElement ] * dim ) + ( m % dim );
temp[n] += u[global_col] * d_A_local[ n + m*num_rows ];
}
}
for ( int n = 0; n < num_rows; n++ )
{
int global_col = ( node_index [ (n / dim) + id*numNodesPerElement ] * dim ) + ( n % dim );
uTKu += temp[n] * u[global_col];
}
__syncthreads();
uTKu *= 0.5 * pow(chi[id],3);
// reduction
__shared__ double cache[1024];
cache[threadIdx.x] = uTKu;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
}
// computes the measure of non-discreteness (MOD)
__global__
void calcMOD(double* sum, double* chi, double local_volume, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += chi[id] * (1-chi[id]) * local_volume * 4 / ( local_volume * numElements );
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
} | 9b1e1957c91a21949a84584031aed3a31b0651a5.cu | /*
cudakernels.cu
Developed for the master thesis project: GPU-accelerated Thermodynamic Topology Optimization
Author: Wan Arif bin Wan Abhar
Institution: Ruhr Universitaet Bochum
*/
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#include <cmath>
#include <ctime>
#include <iostream>
#include "../include/cudakernels.h"
#define CUDA_CALL( call ) \
{ \
cudaError_t err = call; \
if ( cudaSuccess != err){ \
fprintf(stderr, "CUDA error for %s in %d of %s : %s.\n", #call , __LINE__ , __FILE__ ,cudaGetErrorString(err));exit(EXIT_FAILURE);}\
}
using namespace std;
// Self-defined double-precision atomicAdd function for nvidia GPUs with Compute Capability 6 and below.
// Pre-defined atomicAdd() with double-precision does not work for pre-CC7 nvidia GPUs.
__device__
double atomicAdd_double(double* address, double val)
{
unsigned long long int* address_as_ull =
(unsigned long long int*)address;
unsigned long long int old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed,
__double_as_longlong(val +
__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
// determines 1-dimensional CUDA block and grid sizes based on the number of rows N
__host__
void calculateDimensions(size_t N, dim3 &gridDim, dim3 &blockDim)
{
if ( N <= 1024 )
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
}
else
{
blockDim.x = 1024; blockDim.y = 1; blockDim.z = 1;
gridDim.x = (int)ceil(N/blockDim.x)+1; gridDim.y = 1; gridDim.z = 1;
}
}
// determines 2-dimensional CUDA block and grid sizes based on the number of rows N
__host__ void calculateDimensions2D(size_t Nx, size_t Ny, dim3 &gridDim, dim3 &blockDim)
{
if ( Nx <= 32 && Ny <= 32)
{
blockDim.x = 32; blockDim.y = 32; blockDim.z = 1;
gridDim.x = 1; gridDim.y = 1; gridDim.z = 1;
}
else
{
blockDim.x = 32; blockDim.y = 32; blockDim.z = 1;
gridDim.x = (int)ceil(Nx/blockDim.x)+1; gridDim.y = (int)ceil(Ny/blockDim.y)+1; gridDim.z = 1;
}
}
// calculates the DOF of a grid with dimensions
__host__ size_t calcDOF(size_t Nx, size_t Ny, size_t dim)
{
return (Nx + 1) * (Ny + 1) * dim;
}
// returns value of an ELLPack matrix A at (x,y)
__device__
double valueAt(size_t x, size_t y, double* vValue, size_t* vIndex, size_t max_row_size)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[x * max_row_size + k] == y)
return vValue[x * max_row_size + k];
}
return 0.0;
}
// returns value of a transposed ELLPack matrix A at (row,col)
__device__
double valueAt_(size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows)
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + row] == col)
return vValue[k * num_rows + row];
}
return 0.0;
}
__device__
void setAt( size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows, double value )
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + col] == row)
{
vValue[k * num_rows + col] = value;
k = max_row_size; // to exit for loop
}
}
}
// a[] = 0.0
__global__
void setToZero(double* a, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = 0.0;
}
// a = 1
__global__
void setToOne(double* a)
{
*a = 1;
}
// norm = x.norm()
__global__
void norm_GPU(double* norm, double* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id == 0 )
*norm = 0;
__syncthreads();
if ( id < num_rows )
{
#if __CUDA_ARCH__ < 600
atomicAdd_double( norm, x[id]*x[id] );
#else
atomicAdd( norm, x[id]*x[id] );
#endif
}
__syncthreads();
if ( id == 0 )
*norm = sqrt(*norm);
}
// a[] = 0, size_t
__global__
void setToZero(size_t* a, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = 0.0;
}
// bool = true
__global__
void setToTrue( bool *foo )
{
*foo = true;
}
// x = sqrt(x)
__global__
void sqrt_GPU(double *x)
{
*x = sqrt(*x);
}
// sum = sum( x[n]*x[n] )
__global__
void sumOfSquare_GPU(double* sum, double* x, size_t n)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
double temp = 0.0;
while(id < n)
{
temp += x[id]*x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reset id
id = threadIdx.x + blockDim.x*blockIdx.x;
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__global__
void LastBlockSumOfSquare_GPU(double* sum, double* x, size_t n, size_t counter)
{
int id = threadIdx.x + blockDim.x*blockIdx.x;
if ( id >= counter*blockDim.x && id < n )
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, x[id]*x[id]);
#else
atomicAdd(sum, x[id]*x[id]);
#endif
}
}
__host__
void norm_GPU(double* d_norm, double* d_x, size_t N, dim3 gridDim, dim3 blockDim)
{
setToZero<<<1,1>>>( d_norm, 1);
// getting the last block's size
size_t lastBlockSize = N;
size_t counter = 0;
if ( N % gridDim.x == 0 ) {}
else
{
while ( lastBlockSize >= gridDim.x)
{
counter++;
lastBlockSize -= gridDim.x;
}
}
// sum of squares for the full blocks
sumOfSquare_GPU<<<gridDim.x - 1, blockDim>>>(d_norm, d_x, (gridDim.x - 1)*blockDim.x);
// sum of squares for the last incomplete block
LastBlockSumOfSquare_GPU<<<1, lastBlockSize>>>(d_norm, d_x, N, counter);
sqrt_GPU<<<1,1>>>( d_norm );
}
//// DEBUG:
//// helper functions for debugging
__global__
void print_GPU(double* x)
{
printf("[GPU] x = %e\n", *x);
}
__global__
void print_GPU(int* x)
{
printf("[GPU] x = %d\n", *x);
}
__global__
void print_GPU(size_t* x)
{
printf("[GPU] x = %lu\n", *x);
}
__global__
void print_GPU(bool* x)
{
printf("[GPU] x = %d\n", *x);
}
__global__ void printLinearVector_GPU(size_t* x, size_t i, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++ )
printf("%lu ", x[j+i*num_cols]);
printf("\n");
}
__global__ void printLinearVector_GPU(double* x, size_t i, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++ )
printf("%g ", x[j+i*num_cols]);
printf("\n");
}
__host__ void printLinearVector(size_t* x, size_t num_rows, size_t num_cols)
{
for(int i = 0 ; i < num_rows ; i++ )
{
printLinearVector_GPU<<<1,1>>>(x, i, num_rows, num_cols);
cudaDeviceSynchronize();
}
}
__host__ void printLinearVector(double* x, size_t num_rows, size_t num_cols)
{
for(int i = 0 ; i < num_rows ; i++ )
{
printLinearVector_GPU<<<1,1>>>(x, i, num_rows, num_cols);
cudaDeviceSynchronize();
}
}
__global__ void print_GPU_(double* x, size_t i)
{
printf("%d %g\n", i, x[i]);
}
__host__ void printVector(double* x, size_t num_rows)
{
for ( int i = 0 ; i < num_rows ; i++ )
print_GPU_<<<1,1>>>( x, i );
}
__global__
void printVector_GPU(double* x)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
printf("[GPU] x[%d] = %e\n", id, x[id]);
}
__global__
void printVector_GPU(double* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
printf("%d %e\n", id, x[id]);
}
__global__
void printVector_GPU(std::size_t* x, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
printf("%d %lu\n", id, x[id]);
}
__global__
void printVector_GPU(int* x)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
printf("[GPU] x[%d] = %d\n", id, x[id]);
}
__global__
void printELL_GPU(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int i = 0 ; i < num_rows ; i++)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%f ", valueAt(i, j, value, index, max_row_size) );
printf("\n");
}
}
__global__
void printELL_GPU_(double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int i = 0 ; i < num_rows ; i++)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%g ", valueAt_(i, j, value, index, max_row_size, num_rows) );
printf("\n");
}
}
__global__
void printELLrow_GPU(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%.3f ", valueAt(row, j, value, index, max_row_size) );
printf("\n");
}
__host__
void printELLrow(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( size_t i = 0 ; i < num_rows ; i++ )
{
printELLrow_GPU<<<1,1>>> (i, value, index, max_row_size, num_rows, num_cols);
cudaDeviceSynchronize();
}
}
// prints matrix with size (num_rows, num_cols) that is stored in a transposed ELLPACK format
__global__
void printELLrow_GPU_(size_t row, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( int j = 0 ; j < num_cols ; j++)
printf("%.3f ", valueAt_(row, j, value, index, max_row_size, num_rows) );
printf("\n");
}
__host__
void printELLrow_(size_t lev, double* value, size_t* index, size_t max_row_size, size_t num_rows, size_t num_cols)
{
for ( size_t i = 0 ; i < num_rows ; i++ )
{
printELLrow_GPU_<<<1,1>>> (i, value, index, max_row_size, num_rows, num_cols);
cudaDeviceSynchronize();
}
}
// (scalar) a = b
__global__
void equals_GPU(double* a, double* b)
{
*a = *b;
}
// x = a * b
__global__
void dotProduct_GPU(double* x, double* a, double* b, size_t num_rows)
{
unsigned int id = threadIdx.x + blockDim.x*blockIdx.x;
unsigned int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
double temp = 0.0;
// filling in the shared variable
while(id < num_rows){
temp += a[id]*b[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
#if __CUDA_ARCH__ < 600
atomicAdd_double(x, cache[0]);
#else
atomicAdd(x, cache[0]);
#endif
}
__syncthreads();
}
__global__
void LastBlockDotProduct(double* dot, double* x, double* y, size_t starting_index)
{
int id = threadIdx.x + blockDim.x*blockIdx.x + starting_index;
#if __CUDA_ARCH__ < 600
atomicAdd_double(dot, x[id]*y[id]);
#else
atomicAdd(dot, x[id]*y[id]);
#endif
}
// dot = a[] * b[]
__host__
void dotProduct(double* dot, double* a, double* b, size_t N, dim3 gridDim, dim3 blockDim)
{
setToZero<<<1,1>>>( dot, 1 );
// getting the last block's size
size_t lastBlockSize = blockDim.x - ( (gridDim.x * blockDim.x ) - N );
if ( N < blockDim.x)
{
LastBlockDotProduct<<<1, N>>>( dot, a, b, 0 );
}
else
{
// dot products for the full blocks
dotProduct_GPU<<<gridDim.x - 1, blockDim>>>(dot, a, b, (gridDim.x - 1)*blockDim.x );
// dot products for the last incomplete block
LastBlockDotProduct<<<1, lastBlockSize>>>(dot, a, b, ( (gridDim.x - 1) * blockDim.x ) );
}
}
// x = y / z
__global__
void divide_GPU(double *x, double *y, double *z)
{
*x = *y / *z;
}
// x += y
__global__ void add_GPU(double *x, double *y)
{
*x += *y;
}
// x -= y
__global__ void minus_GPU(double *x, double *y)
{
*x -= *y;
}
// x += c
__global__
void addVector_GPU(double *x, double *c, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
x[id] += c[id];
}
// a = b
__global__
void vectorEquals_GPU(double* a, double* b, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
a[id] = b[id];
}
////////////////////////////////////////////
// ASSEMBLER
////////////////////////////////////////////
__host__
vector<vector<size_t>> applyBC(vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim)
{
vector<vector<size_t>> bc_index(numLevels);
vector<size_t> nodesPerDim;
for( int i = 0 ; i < N.size() ; i++ )
nodesPerDim.push_back(N[i]+1);
if ( bc_case == 0 )
{
// base level
size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
for ( int i = 0 ; i < nodesPerDim[1] ; i++ )
{
bc_index[0].push_back(i*nodesPerDim[0]*dim);
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[0].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j);
}
}
// y-direction boundary condition at bottom right node
bc_index[0].push_back(dim*N[0] + 1 );
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[0].push_back(dim*N[0] + 1 + totalNodes2D*3*j);
}
// finer levels
for ( int lev = 1 ; lev < numLevels ; lev++ )
{
for( int i = 0 ; i < N.size() ; i++ )
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
for ( int i = 0 ; i < nodesPerDim[1] ; i++ )
{
bc_index[lev].push_back(i*nodesPerDim[0]*dim);
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[lev].push_back(i*nodesPerDim[0]*dim + totalNodes2D*3*j);
}
}
// y-direction boundary condition at bottom right node
bc_index[lev].push_back(nodesPerDim[0]*dim - (dim-1));
if ( dim == 3 )
{
for ( int j = 1 ; j < nodesPerDim[2] ; j++ )
bc_index[lev].push_back(dim*nodesPerDim[0] - (dim-1) + totalNodes2D*3*j);
}
}
}
else if ( bc_case == 1 )
{
if ( N.size() < 3 )
throw(runtime_error("Error : Boundary condition case 1 is not set up yet for 2D"));
// base level
size_t totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
// plane where u2 = 0
for ( int i = 0 ; i < totalNodes2D ; i++ )
bc_index[0].push_back(i*dim + 2);
// 2 points with pinned BC
bc_index[0].push_back( totalNodes2D*3*N[2] );
bc_index[0].push_back( totalNodes2D*3*N[2] + 1 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 2 );
bc_index[0].push_back( totalNodes2D*3*N[2] + (N[0]+1) * (N[1]) * 3 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 1 + (N[0]+1) * (N[1]) * 3 );
bc_index[0].push_back( totalNodes2D*3*N[2] + 2 + (N[0]+1) * (N[1]) * 3 );
// finer levels
for ( int lev = 1 ; lev < numLevels ; lev++ )
{
for( int i = 0 ; i < N.size() ; i++ )
{
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
N[i] *= 2;
}
totalNodes2D = nodesPerDim[0]*nodesPerDim[1];
// plane where u2 = 0
for ( int i = 0 ; i < totalNodes2D ; i++ )
bc_index[lev].push_back(i*dim + 2);
// 2 points with pinned BC
bc_index[lev].push_back( totalNodes2D*3*N[2] );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 1 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 2 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + (N[0]+1) * (N[1]) * 3 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 1 + (N[0]+1) * (N[1]) * 3 );
bc_index[lev].push_back( totalNodes2D*3*N[2] + 2 + (N[0]+1) * (N[1]) * 3 );
}
}
return bc_index;
}
__host__
void applyLoad(vector<double> &b, vector<size_t> N, size_t numLevels, size_t bc_case, size_t dim, double force)
{
if ( bc_case == 0 )
{
vector<size_t> nodesPerDim;
for ( int i = 0 ; i < N.size() ; i++)
nodesPerDim.push_back(N[i]+1);
size_t index = 0;
for ( int lev = 0 ; lev < numLevels - 1 ; lev++)
{
for ( int i = 0 ; i < N.size() ; i++)
nodesPerDim[i] = 2*nodesPerDim[i] - 1;
}
index = dim * nodesPerDim[0] * ( nodesPerDim[1] - 1 ) + 1;
b[index] = force;
if ( dim == 3 )
{
for ( int i = 1 ; i < nodesPerDim[2] ; i++ )
{
index = index + (nodesPerDim[0]*nodesPerDim[1])*dim;
b[index] = force;
}
}
}
else if ( bc_case == 1 )
{
if ( N.size() < 3 )
throw(runtime_error("Error : Load case 1 is not set up yet for 2D"));
// obtaining the finest grid's number of elements on the x-axis
size_t Nx_fine = N[0];
for ( int lev = 0 ; lev < numLevels - 1 ; lev++)
Nx_fine *= 2;
size_t index = (Nx_fine+1)*dim - 2;
b[index] = force;
}
}
// adds local stiffness matrix of an element to the global stiffness matrix
__global__
void assembleGlobalStiffness_GPU(
size_t numElements, // total number of elements
size_t dim, // dimension
double* chi, // the updated design variable value of each element
double* A_local, // local stiffness matrix
size_t num_rows_l, // local stiffness matrix's number of rows
double* value, // global element's ELLPACK value vector
size_t* index, // global element's ELLPACK index vector
size_t max_row_size, // global element's ELLPACK maximum row size
size_t num_rows, // global element's ELLPACK number of rows
size_t* node_index, // vector that contains the corresponding global indices of the node's local indices
size_t p
)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < numElements )
{
int numNodesPerElement = pow(2,dim);
for ( int row = 0 ; row < num_rows_l ; row++ )
{
int y = dim*node_index[ (row/dim) + (id*numNodesPerElement) ] + ( row % dim );
for ( int col = 0 ; col < num_rows_l ; col++ )
{
int x = dim*node_index[ (col/dim) + (id*numNodesPerElement) ] + ( col % dim );
atomicAddAt( x, y, value, index, max_row_size, num_rows, pow(chi[id],p)*A_local[ ( col + row*num_rows_l ) ] );
}
}
}
}
// applies boundary condition on the global stiffness matrix (2d case) where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyMatrixBC2D_GPU(double* value, size_t* index, size_t max_row_size, size_t* bc_index, size_t num_rows, size_t bc_size, size_t Nx, size_t Ny, size_t dim)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < bc_size )
{
// assigning each thread to a single bc index
size_t bc = bc_index[id];
// setting the row entries to '0'
for ( int i = 0 ; i < max_row_size ; i++ )
{
value[ bc + i*num_rows ] = 0.0;
}
// setting the diagonal to '1'
// setting the column entries to '0' through the neighbouring nodes
int base_id = (bc - bc%dim);
bool south = ( bc >= (Nx + 1)*dim );
bool north = ( bc < (Nx+1)*(Ny)*dim );
bool west = ( (bc) % ((Nx + 1)*dim) >= dim );
bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 );
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// origin
{
// setting the diagonal to '1'
setAt( bc, bc, value, index, max_row_size, num_rows, 1.0 );
// and other DOFs on the node to '0'
for ( int i = 1 ; i < dim ; i++)
setAt( bc, bc + i, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( base_id == 0 || east )
{
for ( int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
}
}
// applies boundary condition on the global stiffness matrix (3d case) where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyMatrixBC3D_GPU(double* value, size_t* index, size_t max_row_size, size_t* bc_index, size_t num_rows, size_t bc_size, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < bc_size )
{
// assigning each thread to a single bc index
size_t bc = bc_index[id];
// setting the row entries to '0'
for ( int i = 0 ; i < max_row_size ; i++ )
{
value[ bc + i*num_rows ] = 0.0;
}
// setting the column entries to '0' through the neighbouring nodes
size_t base_id = (id - id%dim);
size_t gridsize_2D = (Nx+1)*(Ny+1)*dim;
bool prev_layer = (bc >= (Nx+1)*(Ny+1)*dim);
bool south = ((bc) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim);
bool north = ((bc) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim);
bool west = ((bc) % ((Nx + 1)*dim) >= dim);
bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0);
//// previous layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i - gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
//// current layer
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// origin
{
// setting the diagonal to '1'
setAt( bc, bc, value, index, max_row_size, num_rows, 1.0 );
// and other DOFs on the node to '0'
for ( int i = 1 ; i < dim ; i++)
setAt( bc, bc + i, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( base_id == 0 || east )
{
for ( int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( base_id == 0 || (north && east ) )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i, value, index, max_row_size, num_rows, 0.0 );
}
//// next layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - (dim * (Nx+1)) + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) - dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
setAt( bc, bc + (dim * (Nx+1)) + dim + i + gridsize_2D, value, index, max_row_size, num_rows, 0.0 );
}
}
}
// applies boundary condition on the prolongation matrix where the affected row/column is set to '0'' and its diagonal to '1'
__global__
void applyProlMatrixBC_GPU( double* value, size_t* index, size_t max_row_size,
size_t* bc_index, size_t* bc_index_,
size_t num_rows, size_t num_rows_,
size_t bc_size, size_t bc_size_)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
for ( int row = 0 ; row < max_row_size ; row++ )
{
for ( int i = 0 ; i < bc_size_ ; i++ )
{
size_t bc_row = bc_index_[i];
if ( value[ id + row*num_rows ] != 1 && index[id + row*num_rows] == bc_row )
value[id + row*num_rows ] = 0;
}
}
}
}
// input the coarse node's "index" to obtain the node's corresponding fine node index
__device__
size_t getFineNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
if ( dim == 3 )
{
size_t twoDimSize = (Nx+1)*(Ny+1);
size_t baseindex = index % twoDimSize;
size_t base_idx = baseindex % (Nx+1);
size_t fine2Dsize = (2*Nx+1)*(2*Ny+1);
size_t multiplier = index/twoDimSize;
return 2*base_idx + (baseindex/(Nx+1))*2*(2*Nx + 1) + 2*fine2Dsize*multiplier;
}
else
return (2 * (index / (Nx + 1)) * (2*Nx + 1) + 2*( index % (Nx+1)) );
}
// ////////////////////////////////////////////
// // SMOOTHERS
// ////////////////////////////////////////////
__global__ void Jacobi_Precond_GPU(double* c, double* value, size_t* index, size_t max_row_size, double* r, size_t num_rows, double damp){
int id = blockDim.x * blockIdx.x + threadIdx.x;
// B = damp / diag(A);
if ( id < num_rows )
c[id] = r[id] * damp / valueAt_(id, id, value, index, max_row_size, num_rows);
}
// ////////////////////////////////////////////
// // SOLVER
// ////////////////////////////////////////////
__global__
void checkIterationConditions(bool* foo, size_t* step, double* res, double* res0, double* m_minRes, double* m_minRed, size_t m_maxIter)
{
if ( *res > *m_minRes && *res > *m_minRed*(*res0) && (*step) <= m_maxIter )
{
*foo = true;
}
else
*foo = false;
}
__global__
void checkIterationConditionsBS(bool* foo, size_t* step, size_t m_maxIter, double* res, double* m_minRes)
{
if ( *res > 1e-12 && (*step) <= m_maxIter )
{
*foo = true;
}
else
*foo = false;
}
__global__
void printInitialResult_GPU(double* res0, double* m_minRes, double* m_minRed)
{
printf(" 0 %e %9.3e ----- -------- %9.3e \n", *res0, *m_minRes, *m_minRed);
}
/// r = b - A*x
__global__
void ComputeResiduum_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r,
double* b)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
// sum += value[offset] * x[ index[offset] ];
sum += value[offset] * __ldg( &x[ index[offset] ] );
}
r[id] = b[id] - sum;
}
}
/// r = r - A*x
__global__
void UpdateResiduum_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
sum += value[offset] * __ldg( &x[ index[offset] ] );
}
r[id] = r[id] - sum;
}
}
// Ax = r for transposed ELLPACK format
__global__ void Apply_GPU (
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value,
const std::size_t* index,
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
double sum = 0;
for ( int n = 0 ; n < max_row_size; n++ )
{
unsigned int offset = id + n*num_rows;
sum += value[offset] * x[ index[offset] ];
}
r[id] = sum;
}
}
/// r = A^T * x for transposed ELLPACK format
/// NOTE: This kernel should be run with A's number of rows as the number of threads
__global__
void ApplyTransposed_GPU(
const std::size_t num_rows,
const std::size_t max_row_size,
const double* value, // A's ELL value array
const std::size_t* index, // A's ELL index array
const double* x,
double* r)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
for ( int n = 0; n < max_row_size; n++ )
{
int col = index [ id + n*num_rows ];
double val = value [ id + n*num_rows ];
#if __CUDA_ARCH__ < 600
atomicAdd_double( &r[col], val*x[id] );
#else
atomicAdd( &r[col], val*x[id] );
#endif
}
}
}
// outputs result in the terminal
__global__
void printResult_GPU(size_t* step, double* res, double* m_minRes, double* lastRes, double* res0, double* m_minRed)
{
if(*step < 10)
printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed);
else
printf(" %d %e %9.3e %9.3e %e %9.3e \n", *step, *res, *m_minRes, (*res)/(*lastRes), (*res)/(*res0), *m_minRed);
}
// increases the iteration step
__global__ void addStep(size_t* step){
++(*step);
}
// p = z + p * (rho / rho_old);
__global__
void calculateDirectionVector(
size_t* d_step,
double* d_p,
double* d_z,
double* d_rho,
double* d_rho_old,
size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
{
if(*d_step == 1)
{
d_p[id] = d_z[id];
}
else
{
// p *= (rho / rho_old)
d_p[id] = d_p[id] * ( *d_rho / (*d_rho_old) );
// p += z;
d_p[id] = d_p[id] + d_z[id];
}
}
}
// d_alpha = *d_rho / ( d_p * d_z )
__host__
void calculateAlpha(
double* d_alpha,
double* d_rho,
double* d_p,
double* d_z,
double* d_alpha_temp,
size_t num_rows,
dim3 gridDim,
dim3 blockDim)
{
setToZero<<<1,1>>>( d_alpha_temp, 1);
// alpha_temp = ( p * z )
dotProduct(d_alpha_temp, d_p, d_z, num_rows, gridDim, blockDim);
// d_alpha = *d_rho / (*alpha_temp)
divide_GPU<<<1,1>>>(d_alpha, d_rho, d_alpha_temp);
}
// x = x + alpha * p
__global__
void axpy_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
d_x[id] += (*d_alpha * d_p[id]);
}
// x = x - alpha * p
__global__
void axpy_neg_GPU(double* d_x, double* d_alpha, double* d_p, size_t num_rows)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < num_rows )
d_x[id] = d_x[id] - (*d_alpha * d_p[id]);
}
//// TDO
// calculates the driving force of all elements
// one thread computes one element
// df[] = 0.5 * p * pow(chi[], p-1) / local_volume * u[]^T * A * u[]
__global__
void calcDrivingForce( double *df, // driving force
double *u, // displacement vector
double* chi, // design variable
double p, // penalization parameter
size_t* node_index, // node index array
double* d_A_local, // local stiffness matrix
size_t num_rows, // num_rows of local stiffness matrix
size_t dim, // dimension
double local_volume,
size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < numElements)
{
double temp[24];
size_t numNodesPerElement = pow(2,dim);
df[id] = 0;
for ( int n = 0; n < num_rows; n++ )
{
temp[n]=0;
for ( int m = 0; m < num_rows; m++)
{
// converts local node to global node
int global_col = ( node_index [ (m / dim) + id*numNodesPerElement ] * dim ) + ( m % dim );
temp[n] += u[global_col] * d_A_local[ n + m*num_rows ];
}
}
for ( int n = 0; n < num_rows; n++ )
{
int global_col = ( node_index [ (n / dim) + id*numNodesPerElement ] * dim ) + ( n % dim );
df[id] += temp[n] * u[global_col];
}
df[id] *= 0.5 * p * pow(chi[id], p-1) / local_volume;
}
}
// sum = sum(x)
// n = size of x vector
__global__
void sumOfVector_GPU(double* sum, double* x, size_t n)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < n)
{
temp += x[id];
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
// laplacian for both 2d and 3d cases
// for 2d, Nz has to be predefined to '1'
__device__
double laplacian_GPU( double *array, size_t ind, size_t Nx, size_t Ny, size_t Nz, double h )
{
bool east = ( (ind + 1) % Nx != 0 );
bool north = ( ind + Nx < Nx*Ny );
bool west = ( ind % Nx != 0 );
bool south = ( ind >= Nx );
bool previous_layer = (ind >= Nx*Ny);
bool next_layer = (ind < Nx*Ny*(Nz-1));
double value = -4.0 * array[ind];
// east element
if ( east )
value += 1.0 * array[ind + 1];
else
value += 1.0 * array[ind];
// north element
if ( north )
value += 1.0 * array[ind + Nx];
else
value += 1.0 * array[ind];
// west element
if ( west )
value += 1.0 * array[ind - 1];
else
value += 1.0 * array[ind];
// south element
if ( south )
value += 1.0 * array[ind - Nx];
else
value += 1.0 * array[ind];
// if 3D
if (Nz > 0)
{
value -= 2.0 * array[ind];
// previous layer's element
if ( previous_layer )
value += 1.0 * array[ind - (Nx*Ny)];
else
value += 1.0 * array[ind];
if ( next_layer )
value += 1.0 * array[ind + (Nx*Ny)];
else
value += 1.0 * array[ind];
}
return value/(h*h);
}
__global__
void calcLambdaUpper(double *df_array, double *max, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[1024];
*max = -1.0e9;
*mutex = 0;
double temp = -1.0e9;
while(index + offset < numElements){
temp = fmaxf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) );
// temp = fmaxf(temp, ( df_array[index + offset] + *eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fmaxf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*max = fmaxf(*max, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
__global__
void calcLambdaLower(double *df_array, double *min, int *mutex, double* beta, double *chi, double* eta, int Nx, int Ny, int Nz, unsigned int numElements, double h)
{
unsigned int index = threadIdx.x + blockIdx.x*blockDim.x;
unsigned int stride = gridDim.x*blockDim.x;
unsigned int offset = 0;
__shared__ double cache[1024];
*min = 1.0e9;
*mutex = 0;
double temp = 1.0e9;
if ( index < numElements )
{
while(index + offset < numElements){
temp = fminf(temp, ( df_array[index + offset] + ( *beta * laplacian_GPU( chi, index, Nx, Ny, Nz, h ) ) ) );
// temp = fminf(temp, ( df_array[index + offset] - *eta ) );
offset += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] = fminf(cache[threadIdx.x], cache[threadIdx.x + i]);
}
__syncthreads();
i /= 2;
}
if(threadIdx.x == 0){
while(atomicCAS(mutex,0,1) != 0); //lock
*min = fminf(*min, cache[0]);
atomicExch(mutex, 0); //unlock
}
}
}
__global__
void calcChiTrial(
double *chi,
double *df,
double *lambda_trial,
double del_t,
double* eta,
double* beta,
double* chi_trial,
size_t Nx,
size_t Ny,
size_t Nz,
size_t numElements,
double h
)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < numElements )
{
double del_chi;
del_chi = ( del_t / *eta ) * ( df[id] - *lambda_trial + (*beta)*( laplacian_GPU( chi, id, Nx, Ny, Nz, h ) ) );
if ( del_chi + chi[id] > 1 )
chi_trial[id] = 1;
else if ( del_chi + chi[id] < 1e-9 )
chi_trial[id] = 1e-9;
else
chi_trial[id] = del_chi + chi[id];
}
}
__global__
void calcLambdaTrial(double *rho_trial, double rho, double *lambda_l, double *lambda_u, double *lambda_trial)
{
if ( *rho_trial > rho )
*lambda_l = *lambda_trial;
else
*lambda_u = *lambda_trial;
*lambda_trial = 0.5 * ( *lambda_l + *lambda_u );
}
__global__ void calcRhoTrial(double* rho_tr, double local_volume, size_t numElements)
{
double total_volume = local_volume * numElements;
*rho_tr *= local_volume;
*rho_tr /= total_volume;
}
// calculate the average weighted driving force, p_w
__global__
void calcP_w_GPU(double* p_w, double* df, double* uTAu, double* chi, int p, double local_volume, size_t numElements)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double int_g_p[1024];
__shared__ double int_g[1024];
if( id < numElements)
{
df[id] = uTAu[id] * ( local_volume / (2*local_volume) ) * p * pow(chi[id], p - 1);
int_g_p[id] = (chi[id] - 1e-9)*(1-chi[id]) * df[id] * local_volume;
int_g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
__syncthreads();
if ( id == 0 )
{
for ( int i = 1 ; i < numElements ; ++i )
int_g_p[0] += int_g_p[i];
}
if ( id == 1 )
{
for ( int i = 1 ; i < numElements ; ++i )
int_g[0] += int_g[i];
}
__syncthreads();
if ( id == 0 )
*p_w = int_g_p[0] / int_g[0];
}
}
__global__
void calc_g_GPU(double*g, double* chi, size_t numElements, double local_volume)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if (id < numElements)
{
g[id] = (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
// if ( id == 0 )
// printf("%f\n", g[id]);
}
}
__global__
void calc_g_GPU_(double* sum, double* chi, size_t numElements, double local_volume)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < numElements )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += (chi[id] - 1e-9)*(1-chi[id]) * local_volume;
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
// sum = sum ( df * g * local_volume)
__global__
void calcSum_df_g_GPU(double* sum, double* df, double* g, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += df[id]*g[id]; // local volume is already included in g, i.e. g = g*local_volume
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__host__
void calcP_w(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, double* g, double* df_g, size_t numElements, double local_volume)
{
dim3 gridDim;
dim3 blockDim;
calculateDimensions(numElements, gridDim, blockDim);
// calculate g of each element * local_volume
calc_g_GPU<<<gridDim, blockDim>>>(g, chi, numElements, local_volume);
// calculate sum_g = sum(g)
sumOfVector_GPU<<<gridDim, blockDim>>>(sum_g, g, numElements);
// sum_df_g = sum( g[i]*df[i]*local_volume )
calcSum_df_g_GPU<<<gridDim, blockDim>>>(sum_df_g, df, g, numElements);
// p_w = sum_df_g / sum_g
divide_GPU<<<1,1>>>(p_w, sum_df_g, sum_g);
}
// sum = sum ( df * g * local_volume)
__global__
void calcSum_df_g_GPU_(double* sum, double* df, double* chi, size_t numElements, double local_volume)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
// if ( id < n )
// printf("%d : %e\n", id, x[id]);
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += df[id]* ( (chi[id] - 1e-9)*(1-chi[id]) * local_volume );
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
__host__
void calcP_w_(double* p_w, double* sum_g, double* sum_df_g, double* df, double* chi, size_t numElements, double local_volume)
{
dim3 gridDim;
dim3 blockDim;
calculateDimensions(numElements, gridDim, blockDim);
// calculate g of each element * local_volume
// calculate sum_g = sum(g)
calc_g_GPU_<<<gridDim, blockDim>>>(sum_g, chi, numElements, local_volume);
// sum_df_g = sum( g[i]*df[i]*local_volume )
calcSum_df_g_GPU_<<<gridDim, blockDim>>>(sum_df_g, df, chi, numElements, local_volume);
// p_w = sum_df_g / sum_g
divide_GPU<<<1,1>>>(p_w, sum_df_g, sum_g);
}
// two threads to calculate eta and beta
__global__ void calcEtaBeta( double* eta, double* beta, double etastar, double betastar, double* p_w )
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id == 0 )
*eta = etastar * (*p_w);
if ( id == 1 )
*beta = betastar * (*p_w);
}
// convergence check in for the bisection algorithm in the density update process
__global__ void checkTDOConvergence(bool* foo, double rho, double* rho_trial)
{
if ( abs(rho - *rho_trial) < 1e-7 )
*foo = false;
else
*foo = true;
}
// computes and fills in the global stiffness matrix's ELL index array for 2d case
__global__ void fillIndexVector2D_GPU(size_t* index, size_t Nx, size_t Ny, size_t max_row_size, size_t num_rows)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
int counter = 0;
int dim = 2;
if ( id < num_rows )
{
int base_id = (id - id%dim);
// south-west
if ( id >= (Nx + 1)*dim && (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i;
counter++;
}
}
// south
if ( id >= (Nx + 1)*dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i;
counter++;
}
}
// south-east
if ( id >= (Nx + 1)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i;
counter++;
}
}
// west
if ( (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i;
counter++;
}
}
// origin
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i;
counter++;
}
// east
if ( base_id == 0 || (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i;
counter++;
}
}
// north-west
if ( id < (Nx+1)*(Ny)*dim && (id) % ((Nx + 1)*dim) >= dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i;
counter++;
}
}
// north
if ( id < (Nx+1)*(Ny)*dim )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i;
counter++;
}
}
// north-east
if ( base_id == 0 || id < (Nx+1)*(Ny)*dim && (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i;
counter++;
}
}
for ( int i = counter ; i < max_row_size; i++)
{
index[id + i*num_rows] = num_rows;
}
}
}
// computes and fills in the global stiffness matrix's ELL index array for 3d case
__global__ void fillIndexVector3D_GPU(size_t* index, size_t Nx, size_t Ny, size_t Nz, size_t max_row_size, size_t num_rows)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
int counter = 0;
int dim = 3;
if ( id < num_rows )
{
size_t base_id = (id - id%dim);
size_t gridsize_2D = (Nx+1)*(Ny+1)*dim;
// boolean variables that returns true if the neighbouring node exists
bool prev_layer = (id >= (Nx+1)*(Ny+1)*dim);
bool next_layer = (id < (Nx+1)*(Ny+1)*(Nz)*dim);
bool south = ((id) % ((Nx + 1)*(Ny + 1)*dim) >= (Nx + 1)*dim);
bool north = ((id) % ((Nx + 1)*(Ny + 1)*dim) < (Nx+1)*(Ny)*dim);
bool west = ((id) % ((Nx + 1)*dim) >= dim);
bool east = ((base_id) % ((Nx*dim) + (base_id/(3*(Nx+1)))*dim*(Nx+1)) != 0);
//// previous layer
// south-west
if ( prev_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i - gridsize_2D;
counter++;
}
}
// south
if ( prev_layer && south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i - gridsize_2D;
counter++;
}
}
// south-east
if ( prev_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i - gridsize_2D;
counter++;
}
}
// west
if ( prev_layer && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i - gridsize_2D;
counter++;
}
}
// origin
if ( prev_layer )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i - gridsize_2D;
counter++;
}
}
// east
if ( prev_layer && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i - gridsize_2D;
counter++;
}
}
// north-west
if ( prev_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i - gridsize_2D;
counter++;
}
}
// north
if ( prev_layer && north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i - gridsize_2D;
counter++;
}
}
// north-east
if ( prev_layer && north && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i - gridsize_2D;
counter++;
}
}
//// current layer
// south-west
if ( south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i;
counter++;
}
}
// south
if ( south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i;
counter++;
}
}
// south-east
if ( south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i;
counter++;
}
}
// west
if ( west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i;
counter++;
}
}
// origin
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i;
counter++;
}
// east
if ( base_id == 0 || east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i;
counter++;
}
}
// north-west
if ( north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i;
counter++;
}
}
// north
if ( north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i;
counter++;
}
}
// north-east
if ( base_id == 0 || (north && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i;
counter++;
}
}
//// next layer
// south-west
if ( next_layer && south && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim - dim + i + gridsize_2D;
counter++;
}
}
// south
if ( next_layer && south )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + i + gridsize_2D;
counter++;
}
}
// south-east
if ( next_layer && south && east )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - (Nx+1)*dim + dim + i + gridsize_2D;
counter++;
}
}
// west
if ( next_layer && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) - dim + i + gridsize_2D;
counter++;
}
}
// origin
if ( next_layer )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + i + gridsize_2D;
counter++;
}
}
// east
if ( base_id == 0 || ( next_layer && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + dim + i + gridsize_2D;
counter++;
}
}
// north-west
if ( next_layer && north && west )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim - dim + i + gridsize_2D;
counter++;
}
}
// north
if ( next_layer && north )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + i + gridsize_2D;
counter++;
}
}
// north-east
if ( base_id == 0 || (next_layer && north && east ) )
{
for(int i = 0 ; i < dim ; i++)
{
index[id + counter*num_rows] = (id - id%dim) + (Nx+1)*dim + dim + i + gridsize_2D;
counter++;
}
}
for ( int i = counter ; i < max_row_size; i++)
{
index[id + i*num_rows] = num_rows;
}
}
}
// assembles the prolongation matrix for 2d case
// the ELL value and index arrays are calculated and filled
__global__ void fillProlMatrix2D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t p_max_row_size, size_t num_rows, size_t num_cols)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < num_rows )
{
int counter = 0;
int dim = 2;
// coarse grid
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
size_t base_id = (id - id%dim);
size_t node_index = base_id / dim;
int coarse_node_index = getCoarseNode_GPU(node_index, Nx, Ny, 0, dim);
// if node is even numbered
bool condition1 = (node_index % 2 == 0 );
// if node exists in the coarse grid
bool condition2 = ( node_index % ((Nx+1)*2) < (Nx + 1) );
bool south = ( id >= (Nx + 1)*dim );
bool west = ( (id) % ((Nx + 1)*dim) >= dim );
bool east = ( (base_id) % ((Nx*dim) + (base_id/(2*(Nx+1)))*dim*(Nx+1)) != 0 );
bool north = ( id < (Nx+1)*(Ny)*dim );
// if there exists a coarse node in the same location
if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, 0, dim) == node_index )
{
p_index[id + counter*num_rows] = coarse_node_index*dim + id%dim;
p_value[id + counter*num_rows] = 1;
counter++;
}
else
{
// south-west
if ( south && condition1 && !condition2 && west )
{
size_t south_west_fine_node = (node_index - (Nx+1) - 1);
size_t south_west_coarse_node = getCoarseNode_GPU(south_west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// south
if ( south && !condition1 && !condition2 )
{
size_t south_fine_node = (node_index - (Nx+1) );
size_t south_coarse_node = getCoarseNode_GPU(south_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// south-east
if ( south && condition1 && !condition2 && east )
{
size_t south_east_fine_node = (node_index - (Nx+1) + 1);
size_t south_east_coarse_node = getCoarseNode_GPU(south_east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = south_east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// west
if ( west && condition2 )
{
size_t west_fine_node = (node_index - 1);
size_t west_coarse_node = getCoarseNode_GPU(west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// east
if ( east && condition2 )
{
size_t east_fine_node = (node_index + 1);
size_t east_coarse_node = getCoarseNode_GPU(east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north-west
if ( north && condition1 && !condition2 && west )
{
size_t north_west_fine_node = (node_index + (Nx+1) - 1);
size_t north_west_coarse_node = getCoarseNode_GPU(north_west_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_west_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// north
if ( north && !condition1 && !condition2 )
{
size_t north_fine_node = (node_index + (Nx+1) );
size_t north_coarse_node = getCoarseNode_GPU(north_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north-east
if ( north && condition1 && !condition2 && east )
{
size_t north_east_fine_node = (node_index + (Nx+1) + 1);
size_t north_east_coarse_node = getCoarseNode_GPU(north_east_fine_node, Nx, Ny, 0, dim);
p_index[id + counter*num_rows] = north_east_coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
}
// remaining entries are filled with num_cols
for ( int i = counter ; i < p_max_row_size; i++)
{
p_index[id + i*num_rows] = num_cols;
}
}
}
// assembles the prolongation matrix for 3d case
// the ELL value and index arrays are calculated and filled
__global__ void fillProlMatrix3D_GPU(double* p_value, size_t* p_index, size_t Nx, size_t Ny, size_t Nz, size_t p_max_row_size, size_t num_rows, size_t num_cols)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
if ( id < num_rows )
{
int counter = 0;
int dim = 3;
// coarse grid
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
size_t Nz_ = Nz / 2;
size_t base_id = (id - id%dim);
size_t id_2D = (id) % ((Nx+1)*(Ny+1)*dim);
size_t node_index = base_id / dim;
int coarse_node_index = getCoarseNode3D_GPU(node_index, Nx, Ny, Nz);
size_t numNodes2D = (Nx+1)*(Ny+1);
// if node is even numbered
bool condition1 = ( node_index % 2 == 0 );
bool condition5 = ( (id_2D/dim) % ((Nx+1)*2) < (Nx+1) );
bool condition6 = ( node_index % (numNodes2D*2) < (Nx+1)*(Ny+1) );
// if there exists a coarse node in the same location
if ( getFineNode_GPU(coarse_node_index, Nx_, Ny_, Nz_, dim) == node_index )
{
p_index[id + counter*num_rows] = coarse_node_index*dim + id%dim;
p_value[id + counter*num_rows] = 1;
counter++;
}
// diagonals
else if ( !condition1 && !condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-south-west
fine_node = (node_index - numNodes2D - (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-south-east
fine_node = (node_index - numNodes2D - (Nx+1) + 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-north-west
fine_node = (node_index - numNodes2D + (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// previous-north-east
fine_node = (node_index - numNodes2D + (Nx+1) + 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-south-west
fine_node = (node_index + numNodes2D - (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-south-east
fine_node = (node_index + numNodes2D - (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-north-west
fine_node = (node_index + numNodes2D + (Nx+1) - 1 );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
// next-north-east
fine_node = (node_index + numNodes2D + (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.125 ;
counter++;
}
// diagonals on x-z plane
else if ( condition1 && condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-west
fine_node = (node_index - (Nx+1)*(Ny+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// previous-east
fine_node = (node_index - (Nx+1)*(Ny+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-west
fine_node = (node_index + (Nx+1)*(Ny+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-east
fine_node = (node_index + (Nx+1)*(Ny+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// diagonals in x-y plane
else if ( condition1 && !condition5 && condition6 )
{
size_t fine_node;
size_t coarse_node;
// south-west
fine_node = (node_index - (Nx+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// south-east
fine_node = (node_index - (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// north-east
fine_node = (node_index + (Nx+1) - 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// north-east
fine_node = (node_index + (Nx+1) + 1);
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
// diagonals in y-z plane
else if ( condition1 && !condition5 && !condition6 )
{
size_t fine_node;
size_t coarse_node;
// previous-south
fine_node = (node_index - (Nx+1)*(Ny+1) - (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// previous-north
fine_node = (node_index - (Nx+1)*(Ny+1) + (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-south
fine_node = (node_index + (Nx+1)*(Ny+1) - (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
// next-north
fine_node = (node_index + (Nx+1)*(Ny+1) + (Nx+1) );
coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.25 ;
counter++;
}
else
{
// previous-origin
if ( !condition1 && condition5 && !condition6 )
{
// printf("%lu\n", node_index*dim );
size_t fine_node = (node_index - (Nx+1)*(Ny+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// next-origin
if ( !condition1 && condition5 && !condition6 )
{
// printf("%lu\n", node_index*dim );
size_t fine_node = (node_index + (Nx+1)*(Ny+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// south
if ( !condition1 && !condition5 && condition6 )
{
size_t fine_node = (node_index - (Nx+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// west
if ( !condition1 && condition5 && condition6 )
{
// printf("%lu\n", node_index*3 );
size_t fine_node = (node_index - 1);
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// east
if ( !condition1 && condition5 && condition6 )
{
size_t fine_node = (node_index + 1);
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
// north
if ( !condition1 && !condition5 && condition6 )
{
size_t fine_node = (node_index + (Nx+1));
size_t coarse_node = getCoarseNode3D_GPU(fine_node, Nx, Ny, Nz);
p_index[id + counter*num_rows] = coarse_node*dim + id%dim ;
p_value[id + counter*num_rows] = 0.5 ;
counter++;
}
}
for ( int i = counter ; i < p_max_row_size; i++)
{
p_index[id + i*num_rows] = num_cols;
}
}
}
// obtaining a node's corresponding node on a coarser grid
__device__ int getCoarseNode_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz, size_t dim)
{
// get coarse grid dimensions
size_t Nx_ = Nx / 2;
// size_t Ny_ = Ny / 2;
// size_t Nz_ = Nz / 2;
// if node is even numbered
bool condition1 = (index % 2 == 0 );
// if node exists in the coarse grid
bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) );
if ( condition1 && condition2 )
{
return index/2 - (index/((Nx+1)*2 ))*(Nx_);
}
// -1 means the node in the coarse grid does not exist
else
return -1;
}
__device__ int getCoarseNode3D_GPU(size_t index, size_t Nx, size_t Ny, size_t Nz)
{
// get coarse grid dimensions
size_t Nx_ = Nx / 2;
size_t Ny_ = Ny / 2;
// size_t Nz_ = Nz / 2;
size_t gridsize2D = (Nx+1)*(Ny+1);
size_t gridsize2D_ = (Nx_+1)*(Ny_+1);
// if node is even numbered
bool condition1 = ( index % 2 == 0 );
// if node exists in the coarse grid (x-y-plane)
bool condition2 = ( index % ((Nx+1)*2) < (Nx + 1) );
// if node exists in the coarse grid (y-z-plane)
bool condition3 = ( index % ((Nx+1)*(Ny+1)*2) < (Nx+1)*(Ny+1) );
if ( condition1 && condition2 && condition3 )
{
int base_id = index % gridsize2D;
return base_id/2 - (base_id/((Nx+1)*2 ))*(Nx_) + (index/(gridsize2D*2))*gridsize2D_;
// return index/2 - (index/((Nx+1)*2 ))*(Nx_);
}
// -1 means the node in the coarse grid does not exist
else
return -1;
}
// DEBUG: check to ensure mass is conserved during the density update process
__global__ void checkMassConservation(double* chi, double local_volume, size_t numElements)
{
unsigned int id = threadIdx.x + blockIdx.x*blockDim.x;
__shared__ double temp[1024];
if ( id < numElements)
{
// sum of chi * local_volume
temp[id] = chi[id] * local_volume;
}
__syncthreads();
if ( id == 0 )
{
for ( int i = 1 ; i < numElements ; i++ )
{
temp[0] += temp[i];
}
// total volume
double vol = local_volume * numElements;
printf("chi_trial %f\n", temp[0] / vol);
}
}
// adds the value to a transposed ELLPack matrix A at (row,col)
__device__
void atomicAddAt( size_t row, size_t col, double* vValue, size_t* vIndex, size_t max_row_size, size_t num_rows, double value )
{
for(size_t k = 0; k < max_row_size; ++k)
{
if(vIndex[k * num_rows + col] == row)
{
atomicAdd( &vValue[k * num_rows + col] , value );
k = max_row_size; // to exit for loop
}
}
}
// A_coarse = P^T * A_fine * P
// A : fine stiffness matrix
// A_ : coarse stiffness matrix
// P : prolongation matrix
__global__ void PTAP(double* A_value, size_t* A_index, size_t max_row_size, size_t num_rows,
double* A_value_, size_t* A_index_, size_t max_row_size_, size_t num_rows_,
double* P_value, size_t* P_index, size_t p_max_row_size)
{
int k = blockDim.x * blockIdx.x + threadIdx.x;
if( k < num_rows )
{
for ( int i_ = 0 ; i_ < p_max_row_size ; i_++ )
{
size_t i = P_index[k + i_*num_rows];
double P_ki = P_value[k + i_*num_rows];
for( int l_ = 0 ; l_ < max_row_size ; l_++ )
{
size_t l = A_index[k + l_*num_rows];
double A_kl = A_value[k + l_*num_rows];
double P_ki_A_kl = P_ki * A_kl;
for( int j_ = 0 ; j_ < p_max_row_size ; j_++ )
{
size_t j = P_index[l + j_*num_rows];
double P_lj = P_value[l + j_*num_rows];
double P_ki_A_kl_P_lj = P_ki_A_kl * P_lj;
if(P_ki_A_kl_P_lj != 0.0)
atomicAddAt( j, i, A_value_, A_index_, max_row_size_, num_rows_, P_ki_A_kl_P_lj );
}
}
}
}
}
// calculation of compliance, c = 0.5 * sum( u^T * K * u )
// c is labelled as sum
__global__
void calcCompliance(double* sum, double* u, double* chi, size_t* node_index, double* d_A_local, double local_volume, size_t num_rows, size_t dim, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
if ( id < numElements)
{
double uTKu = 0;
double temp[24];
size_t numNodesPerElement = pow(2,dim);
uTKu = 0;
for ( int n = 0; n < num_rows; n++ )
{
temp[n]=0;
for ( int m = 0; m < num_rows; m++)
{
// converts local node to global node
int global_col = ( node_index [ (m / dim) + id*numNodesPerElement ] * dim ) + ( m % dim );
temp[n] += u[global_col] * d_A_local[ n + m*num_rows ];
}
}
for ( int n = 0; n < num_rows; n++ )
{
int global_col = ( node_index [ (n / dim) + id*numNodesPerElement ] * dim ) + ( n % dim );
uTKu += temp[n] * u[global_col];
}
__syncthreads();
uTKu *= 0.5 * pow(chi[id],3);
// reduction
__shared__ double cache[1024];
cache[threadIdx.x] = uTKu;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
}
}
// computes the measure of non-discreteness (MOD)
__global__
void calcMOD(double* sum, double* chi, double local_volume, size_t numElements)
{
int id = blockDim.x * blockIdx.x + threadIdx.x;
int stride = blockDim.x*gridDim.x;
__shared__ double cache[1024];
cache[threadIdx.x] = 0;
double temp = 0.0;
while(id < numElements)
{
temp += chi[id] * (1-chi[id]) * local_volume * 4 / ( local_volume * numElements );
id += stride;
}
cache[threadIdx.x] = temp;
__syncthreads();
// reduction
unsigned int i = blockDim.x/2;
while(i != 0){
if(threadIdx.x < i){
cache[threadIdx.x] += cache[threadIdx.x + i];
}
__syncthreads();
i /= 2;
}
// reduce sum from all blocks' cache
if(threadIdx.x == 0)
{
#if __CUDA_ARCH__ < 600
atomicAdd_double(sum, cache[0]);
#else
atomicAdd(sum, cache[0]);
#endif
}
} |
c0f9ad311e669ccf42655b6ffa8d40d54e6214f9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C"
__global__ void rowfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void rowfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x + k;
int k_in = (x_in < 0 || x_in >= Win) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y + k;
int k_in = (y_in < 0 || y_in >= Hin) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
| c0f9ad311e669ccf42655b6ffa8d40d54e6214f9.cu | extern "C"
__global__ void rowfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void rowfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Win, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int x_in = x + k;
int k_in = (x_in < 0 || x_in >= Win) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = x_in >= 0 ? ((x_in / Win) % 2)
: 1-(((-x_in-1)/Win) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (x_in % Win + Win) % Win;
x_in = (group == 1) ? (Win-1) - res : res;
const int offset = n*C*H*Win + c*H*Win + y*Win + x_in;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use convolution formula: y[n] = sum h[k]*x[n-k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y - k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k-Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
extern "C"
__global__ void colfilter_bwd(
float* dest, const float* src, const float *w, int N, int C, int H, int W,
int Hin, int Mlow, int Mhigh, int stride) {
/* dest - output array. should be same shape as input
src - input array
w - input kernel. Should be a 1d array
N, C, H, W - input tensor sizes
Mlow - idx of most negative filter tap
Mhigh - idx of most positive filter tap
rev - used for calculating gradients - need to do correlation, and
some funny things with the filter.
*/
for (int i = stride * (blockIdx.x * blockDim.x + threadIdx.x);
i < N*C*H*W; i += stride * (blockDim.x * gridDim.x)) {
const int n = i / C / H / W;
const int c = (i / H / W) % C;
const int y = (i / W) % H;
const int x = i % W;
float value = 0;
// Use correlation formula: y[n] = sum h[k]*x[n+k]
#pragma unroll
for (int k = Mlow; k <= Mhigh; k++) {
int y_in = y + k;
int k_in = (y_in < 0 || y_in >= Hin) ? -k : k;
// handle padding - the above complicated equation
// simply makes sure that the correct index input is used
// for symmetric padding. I.e. it should result in x_in going from:
// -3 -2 -1 | 0 1 2 3 4 5 6 | 7 8 9
// to:
// 2 1 0 | 0 1 2 3 4 5 6 | 6 5 4
// It also allows padding by more than the input length.
// The group variable will be:
// 1 1 1 | 0 0 0 0 0 0 0 | 1 1 1 1 1 1 | 0 0 0 ...
const int group = y_in >= 0 ? ((y_in / Hin) % 2)
: 1-(((-y_in-1)/Hin) % 2);
// This does modulo operation but allowing for negative numbers
// i.e. we want -2 % 5 = 3. In python this works but in C we it
// gives -2.
// On top of reflecting the signal, we also need to reflect the
// filter around the boundary (unlike with the forward pass).
const int res = (y_in % Hin + Hin) % Hin;
y_in = (group == 1) ? (Hin-1) - res : res;
const int offset = n*C*Hin*W + c*Hin*W + y_in*W + x;
value += w[k_in - Mlow] * src[offset];
}
dest[i/stride] = value;
}
}
|
122623e1ff8a178915deee908d592c295676b79e.hip | // !!! This is a file automatically generated by hipify!!!
// incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <math.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void incrementArrayOnHost(float *a, unsigned long N)
{
unsigned long i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, unsigned long N)
{
unsigned long idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned long idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned long id = idy*gridDim.x*blockDim.x + idx;
if (id<N) a[id] = a[id]+1.f;
}
int main(int argc, char **argv)
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
unsigned long i, N = strtoul(argv[1], NULL, 10);
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
gpuErrchk( hipMalloc((void **) &a_d, size) );
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
gpuErrchk( hipMemcpy(a_d, a_h, sizeof(float)*N, hipMemcpyHostToDevice) );
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
unsigned long blockDimX = strtoul(argv[2], NULL, 10);
unsigned long blockDimY = strtoul(argv[3], NULL, 10);
unsigned long gridDimX = strtoul(argv[4], NULL, 10);
unsigned long gridDimY = strtoul(argv[5], NULL, 10);
dim3 blockSize = dim3(blockDimX, blockDimY);
dim3 gridSize = dim3(gridDimX, gridDimY);
printf("blockDim: (%lu,%lu), gridDim: (%lu,%lu)\n", blockDimX, blockDimY, gridDimX, gridDimY);
// Part 2 of 2. Call incrementArrayOnDevice kernel
hipLaunchKernelGGL(( incrementArrayOnDevice) , dim3(gridSize), dim3(blockSize) , 0, 0, a_d, N);
// Retrieve result from device and store in b_h
gpuErrchk( hipMemcpy(b_h, a_d, sizeof(float)*N, hipMemcpyDeviceToHost) );
// hipError_t err = hipGetLastError();
// if ( hipSuccess != err ) printf("cudaCheckError() failed: %s\n", hipGetErrorString(err));
hipDeviceSynchronize();
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); hipFree(a_d);
}
| 122623e1ff8a178915deee908d592c295676b79e.cu | // incrementArray.cu
#include <stdio.h>
#include <assert.h>
#include <cuda.h>
#include <math.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
void incrementArrayOnHost(float *a, unsigned long N)
{
unsigned long i;
for (i=0; i < N; i++) a[i] = a[i]+1.f;
}
__global__ void incrementArrayOnDevice(float *a, unsigned long N)
{
unsigned long idx = blockIdx.x*blockDim.x + threadIdx.x;
unsigned long idy = blockIdx.y*blockDim.y + threadIdx.y;
unsigned long id = idy*gridDim.x*blockDim.x + idx;
if (id<N) a[id] = a[id]+1.f;
}
int main(int argc, char **argv)
{
float *a_h, *b_h; // pointers to host memory
float *a_d; // pointer to device memory
unsigned long i, N = strtoul(argv[1], NULL, 10);
size_t size = N*sizeof(float);
// allocate arrays on host
a_h = (float *)malloc(size);
b_h = (float *)malloc(size);
// allocate array on device
gpuErrchk( cudaMalloc((void **) &a_d, size) );
// initialization of host data
for (i=0; i<N; i++) a_h[i] = (float)i;
// copy data from host to device
gpuErrchk( cudaMemcpy(a_d, a_h, sizeof(float)*N, cudaMemcpyHostToDevice) );
// do calculation on host
incrementArrayOnHost(a_h, N);
// do calculation on device:
// Part 1 of 2. Compute execution configuration
unsigned long blockDimX = strtoul(argv[2], NULL, 10);
unsigned long blockDimY = strtoul(argv[3], NULL, 10);
unsigned long gridDimX = strtoul(argv[4], NULL, 10);
unsigned long gridDimY = strtoul(argv[5], NULL, 10);
dim3 blockSize = dim3(blockDimX, blockDimY);
dim3 gridSize = dim3(gridDimX, gridDimY);
printf("blockDim: (%lu,%lu), gridDim: (%lu,%lu)\n", blockDimX, blockDimY, gridDimX, gridDimY);
// Part 2 of 2. Call incrementArrayOnDevice kernel
incrementArrayOnDevice <<< gridSize, blockSize >>> (a_d, N);
// Retrieve result from device and store in b_h
gpuErrchk( cudaMemcpy(b_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost) );
// cudaError err = cudaGetLastError();
// if ( cudaSuccess != err ) printf("cudaCheckError() failed: %s\n", cudaGetErrorString(err));
cudaDeviceSynchronize();
// check results
for (i=0; i<N; i++) assert(a_h[i] == b_h[i]);
// cleanup
free(a_h); free(b_h); cudaFree(a_d);
}
|
976df260ebb6f6b4aac0d72db7547d67f03eac81.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stdio.h"
#include "stdlib.h"
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
void array_print(float* array, int size) {
for (int i = 0; i < size; i++) {
printf("%f ", *(array+i));
}
printf("\n");
}
int main()
{
char** bufferArray = (char**)malloc(sizeof(char*)*20);
char A[] = "aaa";
char B[] = "bbb";
char C[] = "ccc";
char D[] = "ddd";
char E[] = "eee";
*(bufferArray+0) = A;
*(bufferArray+1) = B;
*(bufferArray+2) = C;
*(bufferArray+3) = D;
*(bufferArray+4) = E;
for (int i = 0; i < 4; ++i) {
printf("buff array : %s\n", *(bufferArray+i));
}
char** d_bufferArray;
hipMalloc(&d_bufferArray, )
float* d_A;
hipMalloc(&d_A, size);
float* d_B;
hipMalloc(&d_B, size);
float* d_C;
hipMalloc(&d_C, size);
hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice);
int threadPerBlock = 1024;
int blockPerGrid = (N + threadPerBlock - 1) / threadPerBlock;
hipLaunchKernelGGL(( VecAdd), dim3(blockPerGrid), dim3(threadPerBlock), 0, 0, d_A,d_B,d_C, N);
hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost);
// Free device memory
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
array_print(h_A,3);
array_print(h_B,3);
array_print(h_C,3);
} | 976df260ebb6f6b4aac0d72db7547d67f03eac81.cu | #include "stdio.h"
#include "stdlib.h"
__global__ void VecAdd(float* A, float* B, float* C, int N)
{
int i = blockDim.x * blockIdx.x + threadIdx.x;
if (i < N)
C[i] = A[i] + B[i];
}
void array_print(float* array, int size) {
for (int i = 0; i < size; i++) {
printf("%f ", *(array+i));
}
printf("\n");
}
int main()
{
char** bufferArray = (char**)malloc(sizeof(char*)*20);
char A[] = "aaa";
char B[] = "bbb";
char C[] = "ccc";
char D[] = "ddd";
char E[] = "eee";
*(bufferArray+0) = A;
*(bufferArray+1) = B;
*(bufferArray+2) = C;
*(bufferArray+3) = D;
*(bufferArray+4) = E;
for (int i = 0; i < 4; ++i) {
printf("buff array : %s\n", *(bufferArray+i));
}
char** d_bufferArray;
cudaMalloc(&d_bufferArray, )
float* d_A;
cudaMalloc(&d_A, size);
float* d_B;
cudaMalloc(&d_B, size);
float* d_C;
cudaMalloc(&d_C, size);
cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice);
int threadPerBlock = 1024;
int blockPerGrid = (N + threadPerBlock - 1) / threadPerBlock;
VecAdd<<<blockPerGrid, threadPerBlock>>>(d_A,d_B,d_C, N);
cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost);
// Free device memory
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
array_print(h_A,3);
array_print(h_B,3);
array_print(h_C,3);
} |
55194ca61f6b380eb79d4b02de4cd0df57ac5253.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_T_N_32_32_8_8_8(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int ibx = blockIdx.x * 32;
const int iby = blockIdx.y * 32;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
int idt = tx*8 + ty;
if ( ty >= k )
A += __mul24(ibx, lda) + 0;
else
A += __mul24(ibx, lda) + ty;
if ( (ibx + tx) >= m )
A += __mul24(0, lda);
else
A += __mul24(tx, lda);
if ( (iby+tx) >= n )
B += __mul24(iby+0, ldb);
else
B += __mul24(iby+tx, ldb);
if ( ty >= k )
B += 0;
else
B += ty;
C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc );
lda = lda * 8;
ldb = ldb * 8;
int as1=0, as2=lda, as3=2*lda, as4=3*lda;
int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb;
switch(k) {
case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break;
case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break;
case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break;
}
if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; }
if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; }
double b = B[bs1];
double b1 = B[bs2];
double b2 = B[bs3];
double b3 = B[bs4];
double Ap[4] = { A[as1], A[as2], A[as3], A[as4] };
const double *Bend = B + (k - k % 8);
B += 8;
A += 8;
__shared__ double Bb[8][33];
__shared__ double ABb[32][9];
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int l = 17*(idt/32);
int idt1 = idt;
idt = idt % 32;
if ( k > 15 ) {
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
daxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
daxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
daxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
daxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
daxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
daxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
daxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
daxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if ( k > 7 ) {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k - k % 8;
if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; }
if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; }
as1=0;
daxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
daxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
daxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
daxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
daxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
daxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
daxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
daxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
}
k = k % 8;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( ABb[idt][i], &Bb[i][l], Cb );
}
}
if ( (iby+16*(idt1/32+1)) >= n ) {
lda = n - iby - 16*(idt1/32);
}
else {
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_T_N_32_32_8_8_8(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 8, 8 );
dim3 grid( (m - 1)/32 + 1, (n - 1)/32 + 1 );
hipLaunchKernelGGL(( dgemm_kernel_T_N_32_32_8_8_8), dim3(grid), dim3(threads), 0, magma_stream ,
C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
| 55194ca61f6b380eb79d4b02de4cd0df57ac5253.cu | /*
-- MAGMA (version 1.5.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2014
@precisions normal d -> s
*/
#include "common_magma.h"
#include "commonblas_d.h"
/*
* daxpy computes c += alpha*b, where b and c are 16-element vectors.
*/
static __device__ void daxpy(
double alpha,
const double* __restrict__ b,
double* __restrict__ c )
{
c[0] += alpha * b[0];
c[1] += alpha * b[1];
c[2] += alpha * b[2];
c[3] += alpha * b[3];
c[4] += alpha * b[4];
c[5] += alpha * b[5];
c[6] += alpha * b[6];
c[7] += alpha * b[7];
c[8] += alpha * b[8];
c[9] += alpha * b[9];
c[10] += alpha * b[10];
c[11] += alpha * b[11];
c[12] += alpha * b[12];
c[13] += alpha * b[13];
c[14] += alpha * b[14];
c[15] += alpha * b[15];
}
/**
Purpose:
--------
This routine computes
C = alpha * A^T*B + beta * C
B is put into shared memory
Parameters Used:
blk_M=32 blk_N=32 blk_K=8 nthd_x=8 nthd_y=8
This code should run for any matrix size.
@ingroup magma_dblas3
********************************************************************/
__global__ void
dgemm_kernel_T_N_32_32_8_8_8(
double* __restrict__ C,
const double* __restrict__ A,
const double* __restrict__ B,
int m, int n, int k,
int lda, int ldb, int ldc,
double alpha, double beta )
{
const int ibx = blockIdx.x * 32;
const int iby = blockIdx.y * 32;
const int tx = threadIdx.y;
const int ty = threadIdx.x;
int idt = tx*8 + ty;
if ( ty >= k )
A += __mul24(ibx, lda) + 0;
else
A += __mul24(ibx, lda) + ty;
if ( (ibx + tx) >= m )
A += __mul24(0, lda);
else
A += __mul24(tx, lda);
if ( (iby+tx) >= n )
B += __mul24(iby+0, ldb);
else
B += __mul24(iby+tx, ldb);
if ( ty >= k )
B += 0;
else
B += ty;
C += ibx + idt % 32 + __mul24( iby + 16*(idt/32), ldc );
lda = lda * 8;
ldb = ldb * 8;
int as1=0, as2=lda, as3=2*lda, as4=3*lda;
int bs1=0, bs2=ldb, bs3=2*ldb, bs4=3*ldb;
switch(k) {
case 1: as2=0; as3=0*lda; as4=0; bs2=0; bs3=0*ldb; bs4=0; break;
case 2: as2=lda; as3=0*lda; as4=0; bs2=ldb; bs3=0*ldb; bs4=0; break;
case 3: as2=lda; as3=2*lda; as4=0; bs2=ldb; bs3=2*ldb; bs4=0; break;
}
if ( (ibx + tx ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 8 ) >= m ) { as1=0; as2=0*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 16) >= m ) { as1=0; as2=1*lda; as3=0*lda; as4=0*lda; } else
if ( (ibx + tx + 24) >= m ) { as1=0; as2=1*lda; as3=2*lda; as4=0*lda; }
if ( (iby + tx ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 8 ) >= n ) { bs1=0; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 16) >= n ) { bs1=0; bs2=1*ldb; bs3=0*ldb; bs4=0*ldb; } else
if ( (iby + tx + 24) >= n ) { bs1=0; bs2=1*ldb; bs3=2*ldb; bs4=0*ldb; }
double b = B[bs1];
double b1 = B[bs2];
double b2 = B[bs3];
double b3 = B[bs4];
double Ap[4] = { A[as1], A[as2], A[as3], A[as4] };
const double *Bend = B + (k - k % 8);
B += 8;
A += 8;
__shared__ double Bb[8][33];
__shared__ double ABb[32][9];
double Cb[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
const int l = 17*(idt/32);
int idt1 = idt;
idt = idt % 32;
if ( k > 15 ) {
do {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
daxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
daxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
daxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
daxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
daxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
daxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
daxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
daxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
B += 8;
A += 8;
__syncthreads();
} while (B < Bend);
}
if ( k > 7 ) {
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
as1 = k - k % 8;
if ( as1+ty >= k ) { bs1=0*ldb; bs2=0*ldb; bs3=0*ldb; bs4=0*ldb; B -= 8; }
if ( as1+ty >= k ) { as1=0*lda; as2=0*lda; as3=0*lda; as4=0*lda; A -= 8; }
as1=0;
daxpy( ABb[idt][0], &Bb[0][l], Cb ); Ap[0]=A[as1];
daxpy( ABb[idt][1], &Bb[1][l], Cb ); Ap[1]=A[as2];
daxpy( ABb[idt][2], &Bb[2][l], Cb ); Ap[2]=A[as3];
daxpy( ABb[idt][3], &Bb[3][l], Cb ); Ap[3]=A[as4];
daxpy( ABb[idt][4], &Bb[4][l], Cb ); b=B[bs1];
daxpy( ABb[idt][5], &Bb[5][l], Cb ); b1=B[bs2];
daxpy( ABb[idt][6], &Bb[6][l], Cb ); b2=B[bs3];
daxpy( ABb[idt][7], &Bb[7][l], Cb ); b3=B[bs4];
}
k = k % 8;
if ( k != 0 ) {
__syncthreads();
Bb[ty][tx ] = b;
Bb[ty][tx+8 ] = b1;
Bb[ty][tx+17] = b2;
Bb[ty][tx+25] = b3;
ABb[tx ][ty] = Ap[0];
ABb[tx+8 ][ty] = Ap[1];
ABb[tx+16][ty] = Ap[2];
ABb[tx+24][ty] = Ap[3];
__syncthreads();
for(int i=0; i < k; i++) {
daxpy( ABb[idt][i], &Bb[i][l], Cb );
}
}
if ( (iby+16*(idt1/32+1)) >= n ) {
lda = n - iby - 16*(idt1/32);
}
else {
lda = 16;
}
if ( (ibx+idt) >= m )
lda = 0;
else
lda = lda;
switch(lda) {
case 16:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
C[15*ldc] = alpha * Cb[15] + beta * C[15*ldc];
break;
case 15:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
C[14*ldc] = alpha * Cb[14] + beta * C[14*ldc];
break;
case 14:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
C[13*ldc] = alpha * Cb[13] + beta * C[13*ldc];
break;
case 13:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
C[12*ldc] = alpha * Cb[12] + beta * C[12*ldc];
break;
case 12:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
C[11*ldc] = alpha * Cb[11] + beta * C[11*ldc];
break;
case 11:
C[ 0 ] = alpha * Cb[0] + beta * C[ 0 ];
C[ 1*ldc] = alpha * Cb[1] + beta * C[ 1*ldc];
C[ 2*ldc] = alpha * Cb[2] + beta * C[ 2*ldc];
C[ 3*ldc] = alpha * Cb[3] + beta * C[ 3*ldc];
C[ 4*ldc] = alpha * Cb[4] + beta * C[ 4*ldc];
C[ 5*ldc] = alpha * Cb[5] + beta * C[ 5*ldc];
C[ 6*ldc] = alpha * Cb[6] + beta * C[ 6*ldc];
C[ 7*ldc] = alpha * Cb[7] + beta * C[ 7*ldc];
C[ 8*ldc] = alpha * Cb[8] + beta * C[ 8*ldc];
C[ 9*ldc] = alpha * Cb[9] + beta * C[ 9*ldc];
C[10*ldc] = alpha * Cb[10] + beta * C[10*ldc];
break;
case 10:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
C[9*ldc] = alpha * Cb[9] + beta * C[9*ldc];
break;
case 9:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
C[8*ldc] = alpha * Cb[8] + beta * C[8*ldc];
break;
case 8:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
C[7*ldc] = alpha * Cb[7] + beta * C[7*ldc];
break;
case 7:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
C[6*ldc] = alpha * Cb[6] + beta * C[6*ldc];
break;
case 6:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
C[5*ldc] = alpha * Cb[5] + beta * C[5*ldc];
break;
case 5:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
C[4*ldc] = alpha * Cb[4] + beta * C[4*ldc];
break;
case 4:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
C[3*ldc] = alpha * Cb[3] + beta * C[3*ldc];
break;
case 3:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
C[2*ldc] = alpha * Cb[2] + beta * C[2*ldc];
break;
case 2:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
C[1*ldc] = alpha * Cb[1] + beta * C[1*ldc];
break;
case 1:
C[0 ] = alpha * Cb[0] + beta * C[0 ];
break;
case 0:
break;
}
}
extern "C" void
magmablas_dgemm_T_N_32_32_8_8_8(
double *C, const double *A, const double *B,
magma_int_t m, magma_int_t n, magma_int_t k,
magma_int_t lda, magma_int_t ldb, magma_int_t ldc,
double alpha, double beta )
{
dim3 threads( 8, 8 );
dim3 grid( (m - 1)/32 + 1, (n - 1)/32 + 1 );
dgemm_kernel_T_N_32_32_8_8_8<<< grid, threads, 0, magma_stream >>>
( C, A, B, m, n, k, lda, ldb, ldc, alpha, beta );
}
|
477d7bf511971150944937d0becc649b147dcb9d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <algorithm>
#include <stdint.h>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
#ifdef SSD_STABLE_TOPK
#include <hipcub/hipcub.hpp>
#endif
namespace nvinfer1
{
namespace plugin
{
namespace {
__device__ __inline__ void swap(float &a, float &b)
{
float temp = a;
a = temp;
b = temp;
}
} // namespace anonymous
template <typename T_BBOX>
__device__ T_BBOX bboxSizeOpt(
const Bbox<T_BBOX>& bbox,
const bool normalized)
{
if (normalized) {
// If any bbox dimension is negative the result will be zero.
T_BBOX width = fmaxf(bbox.xmax - bbox.xmin, 0.0f);
T_BBOX height = fmaxf(bbox.ymax - bbox.ymin, 0.0f);
return width * height;
} else {
T_BBOX width = bbox.xmax - bbox.xmin;
T_BBOX height = bbox.ymax - bbox.ymin;
if (width < 0 || height < 0) {
return 0.0f;
}
return (width + 1.0f) * (height + 1.0f);
}
}
template <typename T_BBOX>
__device__ void intersectBboxOpt(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
Bbox<T_BBOX>* intersect_bbox)
{
intersect_bbox->xmin = max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = min(bbox1.ymax, bbox2.ymax);
}
template <typename T_BBOX>
__device__ float jaccardOverlapOpt(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
const bool normalized)
{
Bbox<T_BBOX> intersect_bbox;
intersectBboxOpt(bbox1, bbox2, &intersect_bbox);
float intersect_size = bboxSizeOpt(intersect_bbox, normalized);
float bbox1_size = bboxSizeOpt(bbox1, normalized);
float bbox2_size = bboxSizeOpt(bbox2, normalized);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
template <typename T_BBOX>
__device__ void emptyBboxInfoOpt(
BboxInfo<T_BBOX>* bbox_info)
{
bbox_info->conf_score = T_BBOX(0);
bbox_info->label = -2; // -1 is used for all labels when shared_location is ture
bbox_info->bbox_idx = -1;
bbox_info->kept = false;
}
/********** new NMS for only score and index array **********/
#ifdef SSD_STABLE_TOPK
struct BlockPrefixCallbackOp
{
// Running prefix
int running_total;
// Constructor
__device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ int operator()(int block_aggregate)
{
int old_prefix = running_total;
running_total += block_aggregate;
return old_prefix;
}
};
#endif
template <typename T_SCORE, typename T_BBOX, int TSIZE, bool isNormalized, int BLOCK_THREADS>
__global__ void allClassNMSOpt_kernel(
const int num_no_use,
const int num_classes,
const int num_preds_per_class,
const int top_k_,
const float nms_threshold,
const bool share_location,
const bool isNormalized_unused,
T_BBOX* bbox_data, // bbox_data should be float to preserve location information
T_SCORE* beforeNMS_scores,
int* beforeNMS_index_array,
T_SCORE* afterNMS_scores,
int* afterNMS_index_array,
int* active_count, // number of active elemements per class/batch
int* active_count_per_batch,
bool flipXY = false)
{
const int num_smem_elements = TSIZE * blockDim.x;
// number of active elements for the current batch_class combi
__shared__ int result_active_count;
#ifdef SSD_STABLE_TOPK
// Specialize BlockScan type for our thread block
typedef hipcub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockScan::TempStorage scan;
char smem_buf[BLOCK_THREADS * TSIZE * (sizeof(Bbox<T_BBOX>) + sizeof(bool))];
} temp_storage;
Bbox<T_BBOX> *sh_bbox = reinterpret_cast<Bbox<T_BBOX>*>(temp_storage.smem_buf);
#else
// keep a small smem cache for the bboxes. Alignment is guaranteed due to the order of the definitions.
extern __shared__ int4 dynamic_smem[];
Bbox<T_BBOX> *sh_bbox = reinterpret_cast<Bbox<T_BBOX>*>(dynamic_smem);
#endif
bool *kept_bboxinfo_flag = reinterpret_cast<bool*>(sh_bbox + num_smem_elements);
int active = active_count[blockIdx.y * gridDim.x + blockIdx.x];
int top_k = (active < top_k_) ? active : top_k_;
int class_id = blockIdx.x;
int batch_id = blockIdx.y;
// Each thread touches only a certain subset of all bboxinfos. Keep the kept_bboxinfo_flag for the thread in a bitmask.
uint32_t thread_kept_bboxinfo_flag = 0;
const int offset = batch_id * num_classes * num_preds_per_class + class_id * num_preds_per_class;
// local thread data
// TODO loc_bboxIndex is only required during the bbox initialization phase. don't waste registers for it...
int loc_bboxIndex[TSIZE];
Bbox<T_BBOX> loc_bbox[TSIZE];
if (active)
{
// we do not have to synchronize after writing active_count_per_batch.
// T_SIZE is > 0, so there'll be at least one syncthreads before the first usage of this variable.
if (threadIdx.x == 0) {
result_active_count = 0;
}
const int max_idx = offset + top_k; // put top_k bboxes into NMS calculation
const int bbox_idx_offset = share_location ? (batch_id * num_preds_per_class) : (batch_id * num_classes * num_preds_per_class);
// {{{ initialize Bbox, Bboxinfo, kept_bboxinfo_flag
#pragma unroll
for (int t = 0; t < TSIZE; t++)
{
bool thread_kept_bboxinfo = false;
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if (item_idx < max_idx)
{
loc_bboxIndex[t] = beforeNMS_index_array[item_idx];
if (loc_bboxIndex[t] >= 0)
{
const int bbox_data_idx = share_location ? (loc_bboxIndex[t] % num_preds_per_class + bbox_idx_offset) : loc_bboxIndex[t];
loc_bbox[t] = ((Bbox<T_BBOX>*)bbox_data)[bbox_data_idx];
if (flipXY) {
swap(loc_bbox[t].xmin, loc_bbox[t].ymin);
swap(loc_bbox[t].xmax, loc_bbox[t].ymax);
}
sh_bbox[cur_idx] = loc_bbox[t];
thread_kept_bboxinfo = true;
thread_kept_bboxinfo_flag |= (1 << t);
}
}
kept_bboxinfo_flag[cur_idx] = thread_kept_bboxinfo;
}
// }}}
__syncthreads();
// TODO we can use loc_bboxIndex[t] == -1 to find the maximum index which is -1 and set max_idx to this value. This would reduce
// the number of iterations for all threads if there are less than top-k bboxes available. How likey is this?
// {{{ filter out overlapped boxes with lower scores
{
const int offset = 0;
const int max_idx = top_k;
int ref_item_idx = 0;
while (ref_item_idx < max_idx)
{
Bbox<T_BBOX> ref_bbox;
//*((int4*)&ref_bbox) = *((int4*)&sh_bbox[ref_item_idx - offset]);
ref_bbox = sh_bbox[ref_item_idx];
//uint32_t enabled = ~1;
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if ((item_idx > ref_item_idx) && (thread_kept_bboxinfo_flag & (1 << t)))
{
if (jaccardOverlapOpt(ref_bbox, loc_bbox[t], isNormalized) > nms_threshold)
{
thread_kept_bboxinfo_flag &= ~(1 << t);
kept_bboxinfo_flag[cur_idx] = false;
}
}
}
__syncthreads();
do
{
ref_item_idx++;
} while (ref_item_idx < max_idx && !kept_bboxinfo_flag[ref_item_idx - offset]);
}
}
// }}}
#ifdef SSD_STABLE_TOPK
// need this, since we have union
__syncthreads();
#endif
// {{{ store data
// Ideally we'd compact the data for the next stage to reduce work on the next stage.
// As long as there's no TopK algorithm with a dynamic number of elements for the input
// it doesn't make sense yet to do the compact step.
// first determine the total amount of active elements after the NMS step
int thread_active = __popc(thread_kept_bboxinfo_flag);
#ifdef SSD_STABLE_TOPK
int write_offset;
int aggregate;
BlockScan(temp_storage.scan).ExclusiveSum(thread_active, write_offset, aggregate);
if (threadIdx.x == 0) {
atomicAdd(&active_count_per_batch[batch_id], aggregate);
active_count[blockIdx.y * gridDim.x + blockIdx.x] = aggregate;
}
int write_item_idx = (batch_id * num_classes * top_k_) + blockIdx.x * top_k_ + write_offset;
#endif
if (thread_active) {
#ifndef SSD_STABLE_TOPK
int write_offset = atomicAdd(&active_count_per_batch[batch_id], thread_active);
int write_item_idx = (batch_id * num_classes * top_k_) + write_offset;
#endif
for (int t = 0; t < TSIZE; t++) {
const int cur_idx = threadIdx.x + blockDim.x * t;
const int read_item_idx = offset + cur_idx;
bool is_valid_bbox = (thread_kept_bboxinfo_flag & (1 << t));
if (is_valid_bbox) {
afterNMS_scores[write_item_idx] = beforeNMS_scores[read_item_idx];
afterNMS_index_array[write_item_idx] = loc_bboxIndex[t];
++write_item_idx;
}
}
}
// }}}
}
}
template <typename T_SCORE, typename T_BBOX>
ssdStatus_t allClassNMSOpt_gpu(
hipStream_t stream,
const int num,
const int num_classes,
const int num_preds_per_class,
const int top_k,
const float nms_threshold,
const bool share_location,
const bool isNormalized,
void* bbox_data,
void* beforeNMS_scores,
void* beforeNMS_index_array,
void* afterNMS_scores,
void* afterNMS_index_array,
void* active_count,
void* active_count_per_batch,
bool flipXY = false)
{
const int BLOCK_THREADS = 256;
#define NMS_P(tsize) allClassNMSOpt_kernel<T_SCORE, T_BBOX, (tsize), true, BLOCK_THREADS>
#define NMS_P_U(tsize) allClassNMSOpt_kernel<T_SCORE, T_BBOX, (tsize), false, BLOCK_THREADS>
void (*kernel[2][8])(const int, const int, const int, const int, const float,
const bool, const bool, float*, T_SCORE*, int*, T_SCORE*, int*,
int*, int*, bool)
= {
{NMS_P_U(1), NMS_P_U(2), NMS_P_U(3), NMS_P_U(4), NMS_P_U(5), NMS_P_U(6), NMS_P_U(7), NMS_P_U(8),},
{NMS_P(1), NMS_P(2), NMS_P(3), NMS_P(4), NMS_P(5), NMS_P(6), NMS_P(7), NMS_P(8),}
};
// round up #threads to the minimum cta size possible which holds 1 bbox per thread
// TODO 1024 is the #threads per CTA limit and should be queried from the GPU.
// With top_k > max #threads per CTA this heuristic gets inefficient and should be enhanced
// to reduce the number of idle threads.
#ifdef SSD_STABLE_TOPK
const int t_size = (top_k + BLOCK_THREADS - 1) / BLOCK_THREADS;
dim3 block(BLOCK_THREADS);
const int smem_size = 0;
#else
const int BS = ::min(((top_k + 31) / 32) * 32, 1024);
const int t_size = (top_k + BS - 1) / BS;
dim3 block(BS);
// compute smem size for bbox cache and kept boxes
const int smem_size = BS * t_size * (sizeof(bool) + sizeof(Bbox<T_BBOX>));
#endif
const dim3 GS(num_classes, num);
assert(t_size < 8);
kernel[isNormalized][t_size -hipLaunchKernelGGL(( 1)], dim3(GS), dim3(block), smem_size, stream, num, num_classes, num_preds_per_class,
top_k, nms_threshold, share_location, isNormalized,
(T_BBOX*) bbox_data,
(T_SCORE*) beforeNMS_scores,
(int*) beforeNMS_index_array,
(T_SCORE*) afterNMS_scores,
(int*) afterNMS_index_array,
(int*) active_count,
(int*) active_count_per_batch,
flipXY);
CSC(hipGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// allClassNMSOpt LAUNCH CONFIG {{{
typedef ssdStatus_t (*nmsOptFunc)(hipStream_t,
const int,
const int,
const int,
const int,
const float,
const bool,
const bool,
void*,
void*,
void*,
void*,
void*,
void*, // activeCount
void*, // activeCountPerClass
bool);
struct nmsOptLaunchConfigSSD
{
DType_t t_score;
DType_t t_bbox;
nmsOptFunc function;
nmsOptLaunchConfigSSD(DType_t t_score, DType_t t_bbox)
: t_score(t_score)
, t_bbox(t_bbox)
{
}
nmsOptLaunchConfigSSD(DType_t t_score, DType_t t_bbox, nmsOptFunc function)
: t_score(t_score)
, t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const nmsOptLaunchConfigSSD& other)
{
return t_score == other.t_score && t_bbox == other.t_bbox;
}
};
static std::vector<nmsOptLaunchConfigSSD> nmsOptFuncVec;
bool nmsOptInit()
{
nmsOptFuncVec.push_back(nmsOptLaunchConfigSSD(DataType::kFLOAT, DataType::kFLOAT,
allClassNMSOpt_gpu<float, float>));
return true;
}
static bool initialized = nmsOptInit();
//}}}
ssdStatus_t allClassNMSOpt(hipStream_t stream,
const int num,
const int num_classes,
const int num_preds_per_class,
const int top_k,
const float nms_threshold,
const bool share_location,
const bool isNormalized,
const DType_t DT_SCORE,
const DType_t DT_BBOX,
void* bbox_data,
void* beforeNMS_scores,
void* beforeNMS_index_array,
void* afterNMS_scores,
void* afterNMS_index_array,
void* active_count,
void* active_count_per_batch,
bool flipXY)
{
nmsOptLaunchConfigSSD lc = nmsOptLaunchConfigSSD(DT_SCORE, DT_BBOX, allClassNMSOpt_gpu<float, float>);
for (unsigned i = 0; i < nmsOptFuncVec.size(); ++i)
{
if (lc == nmsOptFuncVec[i])
{
DEBUG_PRINTF("all class nms kernel %d\n", i);
return nmsOptFuncVec[i].function(stream,
num,
num_classes,
num_preds_per_class,
top_k,
nms_threshold,
share_location,
isNormalized,
bbox_data,
beforeNMS_scores,
beforeNMS_index_array,
afterNMS_scores,
afterNMS_index_array,
active_count,
active_count_per_batch,
flipXY);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
| 477d7bf511971150944937d0becc649b147dcb9d.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <vector>
#include <algorithm>
#include <stdint.h>
#include "ssdOpt.h"
#include "ssdOptMacros.h"
#ifdef SSD_STABLE_TOPK
#include <cub/cub.cuh>
#endif
namespace nvinfer1
{
namespace plugin
{
namespace {
__device__ __inline__ void swap(float &a, float &b)
{
float temp = a;
a = temp;
b = temp;
}
} // namespace anonymous
template <typename T_BBOX>
__device__ T_BBOX bboxSizeOpt(
const Bbox<T_BBOX>& bbox,
const bool normalized)
{
if (normalized) {
// If any bbox dimension is negative the result will be zero.
T_BBOX width = fmaxf(bbox.xmax - bbox.xmin, 0.0f);
T_BBOX height = fmaxf(bbox.ymax - bbox.ymin, 0.0f);
return width * height;
} else {
T_BBOX width = bbox.xmax - bbox.xmin;
T_BBOX height = bbox.ymax - bbox.ymin;
if (width < 0 || height < 0) {
return 0.0f;
}
return (width + 1.0f) * (height + 1.0f);
}
}
template <typename T_BBOX>
__device__ void intersectBboxOpt(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
Bbox<T_BBOX>* intersect_bbox)
{
intersect_bbox->xmin = max(bbox1.xmin, bbox2.xmin);
intersect_bbox->ymin = max(bbox1.ymin, bbox2.ymin);
intersect_bbox->xmax = min(bbox1.xmax, bbox2.xmax);
intersect_bbox->ymax = min(bbox1.ymax, bbox2.ymax);
}
template <typename T_BBOX>
__device__ float jaccardOverlapOpt(
const Bbox<T_BBOX>& bbox1,
const Bbox<T_BBOX>& bbox2,
const bool normalized)
{
Bbox<T_BBOX> intersect_bbox;
intersectBboxOpt(bbox1, bbox2, &intersect_bbox);
float intersect_size = bboxSizeOpt(intersect_bbox, normalized);
float bbox1_size = bboxSizeOpt(bbox1, normalized);
float bbox2_size = bboxSizeOpt(bbox2, normalized);
return intersect_size / (bbox1_size + bbox2_size - intersect_size);
}
template <typename T_BBOX>
__device__ void emptyBboxInfoOpt(
BboxInfo<T_BBOX>* bbox_info)
{
bbox_info->conf_score = T_BBOX(0);
bbox_info->label = -2; // -1 is used for all labels when shared_location is ture
bbox_info->bbox_idx = -1;
bbox_info->kept = false;
}
/********** new NMS for only score and index array **********/
#ifdef SSD_STABLE_TOPK
struct BlockPrefixCallbackOp
{
// Running prefix
int running_total;
// Constructor
__device__ BlockPrefixCallbackOp(int running_total) : running_total(running_total) {}
// Callback operator to be entered by the first warp of threads in the block.
// Thread-0 is responsible for returning a value for seeding the block-wide scan.
__device__ int operator()(int block_aggregate)
{
int old_prefix = running_total;
running_total += block_aggregate;
return old_prefix;
}
};
#endif
template <typename T_SCORE, typename T_BBOX, int TSIZE, bool isNormalized, int BLOCK_THREADS>
__global__ void allClassNMSOpt_kernel(
const int num_no_use,
const int num_classes,
const int num_preds_per_class,
const int top_k_,
const float nms_threshold,
const bool share_location,
const bool isNormalized_unused,
T_BBOX* bbox_data, // bbox_data should be float to preserve location information
T_SCORE* beforeNMS_scores,
int* beforeNMS_index_array,
T_SCORE* afterNMS_scores,
int* afterNMS_index_array,
int* active_count, // number of active elemements per class/batch
int* active_count_per_batch,
bool flipXY = false)
{
const int num_smem_elements = TSIZE * blockDim.x;
// number of active elements for the current batch_class combi
__shared__ int result_active_count;
#ifdef SSD_STABLE_TOPK
// Specialize BlockScan type for our thread block
typedef cub::BlockScan<int, BLOCK_THREADS> BlockScan;
__shared__ union {
typename BlockScan::TempStorage scan;
char smem_buf[BLOCK_THREADS * TSIZE * (sizeof(Bbox<T_BBOX>) + sizeof(bool))];
} temp_storage;
Bbox<T_BBOX> *sh_bbox = reinterpret_cast<Bbox<T_BBOX>*>(temp_storage.smem_buf);
#else
// keep a small smem cache for the bboxes. Alignment is guaranteed due to the order of the definitions.
extern __shared__ int4 dynamic_smem[];
Bbox<T_BBOX> *sh_bbox = reinterpret_cast<Bbox<T_BBOX>*>(dynamic_smem);
#endif
bool *kept_bboxinfo_flag = reinterpret_cast<bool*>(sh_bbox + num_smem_elements);
int active = active_count[blockIdx.y * gridDim.x + blockIdx.x];
int top_k = (active < top_k_) ? active : top_k_;
int class_id = blockIdx.x;
int batch_id = blockIdx.y;
// Each thread touches only a certain subset of all bboxinfos. Keep the kept_bboxinfo_flag for the thread in a bitmask.
uint32_t thread_kept_bboxinfo_flag = 0;
const int offset = batch_id * num_classes * num_preds_per_class + class_id * num_preds_per_class;
// local thread data
// TODO loc_bboxIndex is only required during the bbox initialization phase. don't waste registers for it...
int loc_bboxIndex[TSIZE];
Bbox<T_BBOX> loc_bbox[TSIZE];
if (active)
{
// we do not have to synchronize after writing active_count_per_batch.
// T_SIZE is > 0, so there'll be at least one syncthreads before the first usage of this variable.
if (threadIdx.x == 0) {
result_active_count = 0;
}
const int max_idx = offset + top_k; // put top_k bboxes into NMS calculation
const int bbox_idx_offset = share_location ? (batch_id * num_preds_per_class) : (batch_id * num_classes * num_preds_per_class);
// {{{ initialize Bbox, Bboxinfo, kept_bboxinfo_flag
#pragma unroll
for (int t = 0; t < TSIZE; t++)
{
bool thread_kept_bboxinfo = false;
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if (item_idx < max_idx)
{
loc_bboxIndex[t] = beforeNMS_index_array[item_idx];
if (loc_bboxIndex[t] >= 0)
{
const int bbox_data_idx = share_location ? (loc_bboxIndex[t] % num_preds_per_class + bbox_idx_offset) : loc_bboxIndex[t];
loc_bbox[t] = ((Bbox<T_BBOX>*)bbox_data)[bbox_data_idx];
if (flipXY) {
swap(loc_bbox[t].xmin, loc_bbox[t].ymin);
swap(loc_bbox[t].xmax, loc_bbox[t].ymax);
}
sh_bbox[cur_idx] = loc_bbox[t];
thread_kept_bboxinfo = true;
thread_kept_bboxinfo_flag |= (1 << t);
}
}
kept_bboxinfo_flag[cur_idx] = thread_kept_bboxinfo;
}
// }}}
__syncthreads();
// TODO we can use loc_bboxIndex[t] == -1 to find the maximum index which is -1 and set max_idx to this value. This would reduce
// the number of iterations for all threads if there are less than top-k bboxes available. How likey is this?
// {{{ filter out overlapped boxes with lower scores
{
const int offset = 0;
const int max_idx = top_k;
int ref_item_idx = 0;
while (ref_item_idx < max_idx)
{
Bbox<T_BBOX> ref_bbox;
//*((int4*)&ref_bbox) = *((int4*)&sh_bbox[ref_item_idx - offset]);
ref_bbox = sh_bbox[ref_item_idx];
//uint32_t enabled = ~1;
for (int t = 0; t < TSIZE; t++)
{
const int cur_idx = threadIdx.x + blockDim.x * t;
const int item_idx = offset + cur_idx;
if ((item_idx > ref_item_idx) && (thread_kept_bboxinfo_flag & (1 << t)))
{
if (jaccardOverlapOpt(ref_bbox, loc_bbox[t], isNormalized) > nms_threshold)
{
thread_kept_bboxinfo_flag &= ~(1 << t);
kept_bboxinfo_flag[cur_idx] = false;
}
}
}
__syncthreads();
do
{
ref_item_idx++;
} while (ref_item_idx < max_idx && !kept_bboxinfo_flag[ref_item_idx - offset]);
}
}
// }}}
#ifdef SSD_STABLE_TOPK
// need this, since we have union
__syncthreads();
#endif
// {{{ store data
// Ideally we'd compact the data for the next stage to reduce work on the next stage.
// As long as there's no TopK algorithm with a dynamic number of elements for the input
// it doesn't make sense yet to do the compact step.
// first determine the total amount of active elements after the NMS step
int thread_active = __popc(thread_kept_bboxinfo_flag);
#ifdef SSD_STABLE_TOPK
int write_offset;
int aggregate;
BlockScan(temp_storage.scan).ExclusiveSum(thread_active, write_offset, aggregate);
if (threadIdx.x == 0) {
atomicAdd(&active_count_per_batch[batch_id], aggregate);
active_count[blockIdx.y * gridDim.x + blockIdx.x] = aggregate;
}
int write_item_idx = (batch_id * num_classes * top_k_) + blockIdx.x * top_k_ + write_offset;
#endif
if (thread_active) {
#ifndef SSD_STABLE_TOPK
int write_offset = atomicAdd(&active_count_per_batch[batch_id], thread_active);
int write_item_idx = (batch_id * num_classes * top_k_) + write_offset;
#endif
for (int t = 0; t < TSIZE; t++) {
const int cur_idx = threadIdx.x + blockDim.x * t;
const int read_item_idx = offset + cur_idx;
bool is_valid_bbox = (thread_kept_bboxinfo_flag & (1 << t));
if (is_valid_bbox) {
afterNMS_scores[write_item_idx] = beforeNMS_scores[read_item_idx];
afterNMS_index_array[write_item_idx] = loc_bboxIndex[t];
++write_item_idx;
}
}
}
// }}}
}
}
template <typename T_SCORE, typename T_BBOX>
ssdStatus_t allClassNMSOpt_gpu(
cudaStream_t stream,
const int num,
const int num_classes,
const int num_preds_per_class,
const int top_k,
const float nms_threshold,
const bool share_location,
const bool isNormalized,
void* bbox_data,
void* beforeNMS_scores,
void* beforeNMS_index_array,
void* afterNMS_scores,
void* afterNMS_index_array,
void* active_count,
void* active_count_per_batch,
bool flipXY = false)
{
const int BLOCK_THREADS = 256;
#define NMS_P(tsize) allClassNMSOpt_kernel<T_SCORE, T_BBOX, (tsize), true, BLOCK_THREADS>
#define NMS_P_U(tsize) allClassNMSOpt_kernel<T_SCORE, T_BBOX, (tsize), false, BLOCK_THREADS>
void (*kernel[2][8])(const int, const int, const int, const int, const float,
const bool, const bool, float*, T_SCORE*, int*, T_SCORE*, int*,
int*, int*, bool)
= {
{NMS_P_U(1), NMS_P_U(2), NMS_P_U(3), NMS_P_U(4), NMS_P_U(5), NMS_P_U(6), NMS_P_U(7), NMS_P_U(8),},
{NMS_P(1), NMS_P(2), NMS_P(3), NMS_P(4), NMS_P(5), NMS_P(6), NMS_P(7), NMS_P(8),}
};
// round up #threads to the minimum cta size possible which holds 1 bbox per thread
// TODO 1024 is the #threads per CTA limit and should be queried from the GPU.
// With top_k > max #threads per CTA this heuristic gets inefficient and should be enhanced
// to reduce the number of idle threads.
#ifdef SSD_STABLE_TOPK
const int t_size = (top_k + BLOCK_THREADS - 1) / BLOCK_THREADS;
dim3 block(BLOCK_THREADS);
const int smem_size = 0;
#else
const int BS = std::min(((top_k + 31) / 32) * 32, 1024);
const int t_size = (top_k + BS - 1) / BS;
dim3 block(BS);
// compute smem size for bbox cache and kept boxes
const int smem_size = BS * t_size * (sizeof(bool) + sizeof(Bbox<T_BBOX>));
#endif
const dim3 GS(num_classes, num);
assert(t_size < 8);
kernel[isNormalized][t_size - 1]<<<GS, block, smem_size, stream>>>(num, num_classes, num_preds_per_class,
top_k, nms_threshold, share_location, isNormalized,
(T_BBOX*) bbox_data,
(T_SCORE*) beforeNMS_scores,
(int*) beforeNMS_index_array,
(T_SCORE*) afterNMS_scores,
(int*) afterNMS_index_array,
(int*) active_count,
(int*) active_count_per_batch,
flipXY);
CSC(cudaGetLastError(), STATUS_FAILURE);
return STATUS_SUCCESS;
}
// allClassNMSOpt LAUNCH CONFIG {{{
typedef ssdStatus_t (*nmsOptFunc)(cudaStream_t,
const int,
const int,
const int,
const int,
const float,
const bool,
const bool,
void*,
void*,
void*,
void*,
void*,
void*, // activeCount
void*, // activeCountPerClass
bool);
struct nmsOptLaunchConfigSSD
{
DType_t t_score;
DType_t t_bbox;
nmsOptFunc function;
nmsOptLaunchConfigSSD(DType_t t_score, DType_t t_bbox)
: t_score(t_score)
, t_bbox(t_bbox)
{
}
nmsOptLaunchConfigSSD(DType_t t_score, DType_t t_bbox, nmsOptFunc function)
: t_score(t_score)
, t_bbox(t_bbox)
, function(function)
{
}
bool operator==(const nmsOptLaunchConfigSSD& other)
{
return t_score == other.t_score && t_bbox == other.t_bbox;
}
};
static std::vector<nmsOptLaunchConfigSSD> nmsOptFuncVec;
bool nmsOptInit()
{
nmsOptFuncVec.push_back(nmsOptLaunchConfigSSD(DataType::kFLOAT, DataType::kFLOAT,
allClassNMSOpt_gpu<float, float>));
return true;
}
static bool initialized = nmsOptInit();
//}}}
ssdStatus_t allClassNMSOpt(cudaStream_t stream,
const int num,
const int num_classes,
const int num_preds_per_class,
const int top_k,
const float nms_threshold,
const bool share_location,
const bool isNormalized,
const DType_t DT_SCORE,
const DType_t DT_BBOX,
void* bbox_data,
void* beforeNMS_scores,
void* beforeNMS_index_array,
void* afterNMS_scores,
void* afterNMS_index_array,
void* active_count,
void* active_count_per_batch,
bool flipXY)
{
nmsOptLaunchConfigSSD lc = nmsOptLaunchConfigSSD(DT_SCORE, DT_BBOX, allClassNMSOpt_gpu<float, float>);
for (unsigned i = 0; i < nmsOptFuncVec.size(); ++i)
{
if (lc == nmsOptFuncVec[i])
{
DEBUG_PRINTF("all class nms kernel %d\n", i);
return nmsOptFuncVec[i].function(stream,
num,
num_classes,
num_preds_per_class,
top_k,
nms_threshold,
share_location,
isNormalized,
bbox_data,
beforeNMS_scores,
beforeNMS_index_array,
afterNMS_scores,
afterNMS_index_array,
active_count,
active_count_per_batch,
flipXY);
}
}
return STATUS_BAD_PARAM;
}
} // namespace plugin
} // namespace nvinfer1
|
1e89b2500dc8b9eaa70cc863f225c384bae02bd9.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by ops.py//
//header
#define OPS_ACC_MD_MACROS
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <hip/hip_complex.h>
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
__constant__ int nxp;
__constant__ int nyp;
__constant__ int xhalo;
__constant__ double xmin;
__constant__ double xmax;
__constant__ double dx;
__constant__ double pl;
__constant__ double pr;
__constant__ double rhol;
__constant__ double rhor;
__constant__ double ul;
__constant__ double ur;
__constant__ double gam;
__constant__ double gam1;
__constant__ double eps;
__constant__ double lambda;
__constant__ double dt;
__constant__ double del2;
__constant__ double akap2;
__constant__ double tvdsmu;
__constant__ double con;
__constant__ double Mach;
__constant__ double xt;
__constant__ int scale;
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"nxp")) {
cutilSafeCall(hipMemcpyToSymbol(nxp, dat, dim*size));
}
else
if (!strcmp(name,"nyp")) {
cutilSafeCall(hipMemcpyToSymbol(nyp, dat, dim*size));
}
else
if (!strcmp(name,"xhalo")) {
cutilSafeCall(hipMemcpyToSymbol(xhalo, dat, dim*size));
}
else
if (!strcmp(name,"xmin")) {
cutilSafeCall(hipMemcpyToSymbol(xmin, dat, dim*size));
}
else
if (!strcmp(name,"xmax")) {
cutilSafeCall(hipMemcpyToSymbol(xmax, dat, dim*size));
}
else
if (!strcmp(name,"dx")) {
cutilSafeCall(hipMemcpyToSymbol(dx, dat, dim*size));
}
else
if (!strcmp(name,"pl")) {
cutilSafeCall(hipMemcpyToSymbol(pl, dat, dim*size));
}
else
if (!strcmp(name,"pr")) {
cutilSafeCall(hipMemcpyToSymbol(pr, dat, dim*size));
}
else
if (!strcmp(name,"rhol")) {
cutilSafeCall(hipMemcpyToSymbol(rhol, dat, dim*size));
}
else
if (!strcmp(name,"rhor")) {
cutilSafeCall(hipMemcpyToSymbol(rhor, dat, dim*size));
}
else
if (!strcmp(name,"ul")) {
cutilSafeCall(hipMemcpyToSymbol(ul, dat, dim*size));
}
else
if (!strcmp(name,"ur")) {
cutilSafeCall(hipMemcpyToSymbol(ur, dat, dim*size));
}
else
if (!strcmp(name,"gam")) {
cutilSafeCall(hipMemcpyToSymbol(gam, dat, dim*size));
}
else
if (!strcmp(name,"gam1")) {
cutilSafeCall(hipMemcpyToSymbol(gam1, dat, dim*size));
}
else
if (!strcmp(name,"eps")) {
cutilSafeCall(hipMemcpyToSymbol(eps, dat, dim*size));
}
else
if (!strcmp(name,"lambda")) {
cutilSafeCall(hipMemcpyToSymbol(lambda, dat, dim*size));
}
else
if (!strcmp(name,"dt")) {
cutilSafeCall(hipMemcpyToSymbol(dt, dat, dim*size));
}
else
if (!strcmp(name,"del2")) {
cutilSafeCall(hipMemcpyToSymbol(del2, dat, dim*size));
}
else
if (!strcmp(name,"akap2")) {
cutilSafeCall(hipMemcpyToSymbol(akap2, dat, dim*size));
}
else
if (!strcmp(name,"tvdsmu")) {
cutilSafeCall(hipMemcpyToSymbol(tvdsmu, dat, dim*size));
}
else
if (!strcmp(name,"con")) {
cutilSafeCall(hipMemcpyToSymbol(con, dat, dim*size));
}
else
if (!strcmp(name,"Mach")) {
cutilSafeCall(hipMemcpyToSymbol(Mach, dat, dim*size));
}
else
if (!strcmp(name,"xt")) {
cutilSafeCall(hipMemcpyToSymbol(xt, dat, dim*size));
}
else
if (!strcmp(name,"scale")) {
cutilSafeCall(hipMemcpyToSymbol(scale, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "gridgen_kernel_cuda_kernel.cu"
#include "init_kernel_cuda_kernel.cu"
#include "save_kernel_cuda_kernel.cu"
#include "calvar_kernel_cuda_kernel.cu"
#include "xder1_kernel_cuda_kernel.cu"
#include "residue_eval_cuda_kernel.cu"
#include "updateRK3_kernel_cuda_kernel.cu"
#include "Riemann_kernel_cuda_kernel.cu"
#include "limiter_kernel_cuda_kernel.cu"
#include "tvd_kernel_cuda_kernel.cu"
#include "vars_kernel_cuda_kernel.cu"
#include "calupwindeff_kernel_cuda_kernel.cu"
#include "fact_kernel_cuda_kernel.cu"
#include "update_kernel_cuda_kernel.cu"
#include "checkop_kernel_cuda_kernel.cu"
| 1e89b2500dc8b9eaa70cc863f225c384bae02bd9.cu | //
// auto-generated by ops.py//
//header
#define OPS_ACC_MD_MACROS
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <cuComplex.h>
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
__constant__ int nxp;
__constant__ int nyp;
__constant__ int xhalo;
__constant__ double xmin;
__constant__ double xmax;
__constant__ double dx;
__constant__ double pl;
__constant__ double pr;
__constant__ double rhol;
__constant__ double rhor;
__constant__ double ul;
__constant__ double ur;
__constant__ double gam;
__constant__ double gam1;
__constant__ double eps;
__constant__ double lambda;
__constant__ double dt;
__constant__ double del2;
__constant__ double akap2;
__constant__ double tvdsmu;
__constant__ double con;
__constant__ double Mach;
__constant__ double xt;
__constant__ int scale;
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
if (!strcmp(name,"nxp")) {
cutilSafeCall(cudaMemcpyToSymbol(nxp, dat, dim*size));
}
else
if (!strcmp(name,"nyp")) {
cutilSafeCall(cudaMemcpyToSymbol(nyp, dat, dim*size));
}
else
if (!strcmp(name,"xhalo")) {
cutilSafeCall(cudaMemcpyToSymbol(xhalo, dat, dim*size));
}
else
if (!strcmp(name,"xmin")) {
cutilSafeCall(cudaMemcpyToSymbol(xmin, dat, dim*size));
}
else
if (!strcmp(name,"xmax")) {
cutilSafeCall(cudaMemcpyToSymbol(xmax, dat, dim*size));
}
else
if (!strcmp(name,"dx")) {
cutilSafeCall(cudaMemcpyToSymbol(dx, dat, dim*size));
}
else
if (!strcmp(name,"pl")) {
cutilSafeCall(cudaMemcpyToSymbol(pl, dat, dim*size));
}
else
if (!strcmp(name,"pr")) {
cutilSafeCall(cudaMemcpyToSymbol(pr, dat, dim*size));
}
else
if (!strcmp(name,"rhol")) {
cutilSafeCall(cudaMemcpyToSymbol(rhol, dat, dim*size));
}
else
if (!strcmp(name,"rhor")) {
cutilSafeCall(cudaMemcpyToSymbol(rhor, dat, dim*size));
}
else
if (!strcmp(name,"ul")) {
cutilSafeCall(cudaMemcpyToSymbol(ul, dat, dim*size));
}
else
if (!strcmp(name,"ur")) {
cutilSafeCall(cudaMemcpyToSymbol(ur, dat, dim*size));
}
else
if (!strcmp(name,"gam")) {
cutilSafeCall(cudaMemcpyToSymbol(gam, dat, dim*size));
}
else
if (!strcmp(name,"gam1")) {
cutilSafeCall(cudaMemcpyToSymbol(gam1, dat, dim*size));
}
else
if (!strcmp(name,"eps")) {
cutilSafeCall(cudaMemcpyToSymbol(eps, dat, dim*size));
}
else
if (!strcmp(name,"lambda")) {
cutilSafeCall(cudaMemcpyToSymbol(lambda, dat, dim*size));
}
else
if (!strcmp(name,"dt")) {
cutilSafeCall(cudaMemcpyToSymbol(dt, dat, dim*size));
}
else
if (!strcmp(name,"del2")) {
cutilSafeCall(cudaMemcpyToSymbol(del2, dat, dim*size));
}
else
if (!strcmp(name,"akap2")) {
cutilSafeCall(cudaMemcpyToSymbol(akap2, dat, dim*size));
}
else
if (!strcmp(name,"tvdsmu")) {
cutilSafeCall(cudaMemcpyToSymbol(tvdsmu, dat, dim*size));
}
else
if (!strcmp(name,"con")) {
cutilSafeCall(cudaMemcpyToSymbol(con, dat, dim*size));
}
else
if (!strcmp(name,"Mach")) {
cutilSafeCall(cudaMemcpyToSymbol(Mach, dat, dim*size));
}
else
if (!strcmp(name,"xt")) {
cutilSafeCall(cudaMemcpyToSymbol(xt, dat, dim*size));
}
else
if (!strcmp(name,"scale")) {
cutilSafeCall(cudaMemcpyToSymbol(scale, dat, dim*size));
}
else
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "gridgen_kernel_cuda_kernel.cu"
#include "init_kernel_cuda_kernel.cu"
#include "save_kernel_cuda_kernel.cu"
#include "calvar_kernel_cuda_kernel.cu"
#include "xder1_kernel_cuda_kernel.cu"
#include "residue_eval_cuda_kernel.cu"
#include "updateRK3_kernel_cuda_kernel.cu"
#include "Riemann_kernel_cuda_kernel.cu"
#include "limiter_kernel_cuda_kernel.cu"
#include "tvd_kernel_cuda_kernel.cu"
#include "vars_kernel_cuda_kernel.cu"
#include "calupwindeff_kernel_cuda_kernel.cu"
#include "fact_kernel_cuda_kernel.cu"
#include "update_kernel_cuda_kernel.cu"
#include "checkop_kernel_cuda_kernel.cu"
|
2759498ab8c0f05caca6158cdd54dcd025b29d9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <mpi.h>
int* d;
__constant__ int cuda_bf;
__constant__ int cuda_total_vertex;
__constant__ int cuda_tempVertex;
__constant__ int cuda_device_num;
__constant__ int cuda_FW_block;
#define INF 1e9
#define H2D hipMemcpyHostToDevice
#define D2H hipMemcpyDeviceToHost
#define D2D hipMemcpyDeviceToDevice
using namespace std;
int
init_device ()
{
hipSetDevice(0);
return 0;
}
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
//extern __shared__ int D[];
__global__ void floyd_warshall_1(int* dist,int k ,int kbf){
int idx,idy;
idx = k ;
idy = k ;
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
__shared__ int D[32*32];
D[threadIdx.y*cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
__syncthreads();
// Put to shared memory???
int x = 0;
//int dij = dist[i*total_vertex + j];
//int dik = dist[i*total_vertex + k];
//int dkj = dist[k*total_vertex + j];
int dij ,dik,dkj;
int a = threadIdx.y * cuda_bf + threadIdx.x;
int b = threadIdx.y * cuda_bf;
while( x < cuda_bf ){
dij = D[a];
dik = D[b + x];
dkj = D[x*cuda_bf + threadIdx.x];
if(dij>dik+dkj){
D[a] = dik + dkj;
}
__syncthreads();
x++;
}
dist[i*cuda_tempVertex + j] = D[threadIdx.y*cuda_bf + threadIdx.x];
return ;
}
__global__ void floyd_warshall_2(int* dist,int k , int kbf ){
int idx,idy;
if(blockIdx.x % 2 == 0 ){
idx = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2);
idy = k;
}
else {
idx = k;
idy = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2);
}
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
//bool flag = 0;
//if(i>=cuda_total_vertex||j>=cuda_total_vertex)
// return;
__shared__ int D2[32*32*2];
D2[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
D2[(cuda_bf*cuda_bf) + (threadIdx.y *cuda_bf ) + (threadIdx.x)] = dist[ (kbf+threadIdx.y) * cuda_tempVertex + (kbf +threadIdx.x)];
__syncthreads();
// Put to shared memory???
int x = 0;
int dij ,dik,dkj;
int a = (threadIdx.y * cuda_bf + threadIdx.x);
int b;
if(blockIdx.x%2==0){
b = cuda_bf*cuda_bf + threadIdx.x;
}
else{
b = cuda_bf*cuda_bf + cuda_bf*threadIdx.y;
}
dij = D2[a];
while(x<cuda_bf){
if(blockIdx.x%2==0){
dik = D2[cuda_bf*threadIdx.y + x];
dkj = D2[b + (x*cuda_bf)];
}
else{
dik = D2[b + x];
dkj = D2[x*cuda_bf + threadIdx.x];
}
if(dij>dik+dkj){
dij = dik + dkj;
}
__syncthreads();
x++;
}
dist[i*cuda_tempVertex + j] = dij;
return ;
}
__global__ void floyd_warshall_3(int* dist, int k ,int kbf,int ID){
int idx,idy;
int blockIdx_x = ((cuda_FW_block-1)/cuda_device_num)*ID + blockIdx.x;
idy = blockIdx.y >= k? blockIdx.y + 1 : blockIdx.y;
idx = blockIdx_x >= k? blockIdx_x + 1 : blockIdx_x;
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
//if(i>=cuda_total_vertex||j>=cuda_total_vertex)
// return ;
__shared__ int D3[32*32*3];
D3[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
D3[(cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(cuda_bf*idx+threadIdx.y)*cuda_tempVertex + (kbf + threadIdx.x)];
D3[(2*cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(kbf+threadIdx.y)*cuda_tempVertex + (idy*cuda_bf+threadIdx.x)];
__syncthreads();
// Put to shared memory???
int x = 0;
int dij ,dik,dkj;
int a =threadIdx.y * cuda_bf + threadIdx.x;
int b = cuda_bf*cuda_bf + threadIdx.y*cuda_bf;
int c = 2*cuda_bf*cuda_bf + threadIdx.x;
dij = D3[a];
while(x<cuda_bf){
dik = D3[b + x];
dkj = D3[x*cuda_bf + c];
if(dij>dik+dkj){
dij = dik + dkj;
}
x++;
}
dist[i*cuda_tempVertex + j] = dij;
return ;
}
__global__ void floyd_warshall_beta_1(int* dist, int k , int kbf ){
int idx,idy;
idx = k;
idy = k;
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int dij = dist[i*cuda_tempVertex + j];
int dik = dist[i*cuda_tempVertex + kbf];
int dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex+j] = dik + dkj;
}
return ;
}
__global__ void floyd_warshall_beta_2(int* dist, int k , int kbf ){
int idx,idy;
int temp = blockIdx.x / cuda_bf;
if( (temp) % 2 == 0 ){
idx = (temp/2) >= k ? (temp/2+1):(temp/2);
idy = k;
}
else {
idx = k;
idy = (temp/2) >= k ? (temp/2+1):(temp/2);
}
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int dij = dist[i*cuda_tempVertex + j];
int dik = dist[i*cuda_tempVertex + kbf];
int dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex+j] = dik + dkj;
}
return ;
}
__global__ void floyd_warshall_beta_3(int* dist, int k , int kbf ,int grid_size,int ID ){
int idx,idy;
int blockIdx_y = ((cuda_FW_block-1)/cuda_device_num)*ID + blockIdx.y;
int temp = ((blockIdx_y*gridDim.x) + blockIdx.x) / cuda_bf;
idx = temp/grid_size >= k? temp/grid_size + 1 : temp/grid_size;
idy = temp % grid_size >= k? temp%grid_size + 1 : temp % grid_size;
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int x = kbf + cuda_bf;
int dij ,dik,dkj;
while(kbf<x){
dij = dist[i*cuda_tempVertex + j];
dik = dist[i*cuda_tempVertex + kbf];
dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex + j] = dik + dkj;
}
//__syncthreads();
kbf++;
}
return;
}
int main(int argc,char* argv[]){
hipEvent_t total_start, total_stop;
hipEvent_t com_start, com_stop;
hipEvent_t mem_start, mem_stop;
hipEvent_t io_start, io_stop;
MPI_Status status;
MPI_Request req;
float total_temp=0,total_total=0,io_temp =0 , io_total=0 , com_temp =0,com_total=0 , mem_temp=0 , mem_total=0;
int rc = MPI_Init(&argc,&argv);
int rank , process_num;
if(rc!= MPI_SUCCESS){
printf("Error when initializing mpi \n");
}
MPI_Comm_size(MPI_COMM_WORLD,&process_num);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if(rank==0){
hipEventCreate(&total_start);
hipEventCreate(&total_stop);
hipEventCreate(&com_start);
hipEventCreate(&com_stop);
hipEventCreate(&mem_start);
hipEventCreate(&mem_stop);
hipEventCreate(&io_start);
hipEventCreate(&io_stop);
}
hipSetDevice(rank);
cudaCheckErrors("???");
if(rank==0)
hipEventRecord(total_start);
//
//struct hipDeviceProp_t prop;
//hipGetDeviceProperties(&prop,0);
//fprintf(stderr,"clock rate %lf\n",prop.clockRate);
int bf = atoi(argv[3]);
int total_vertex;
int edge_num;
int DEVICE_NUM = 2;
int tempVertex;
int * graph;// = new int[(tempVertex)*(tempVertex)];
ifstream input;
ofstream output;
//fprintf(stderr,"IM here\n");
if(rank==0){
input.open(argv[1]);
input >> total_vertex;
input >> edge_num;
tempVertex = total_vertex % bf ? (total_vertex + (bf - (total_vertex%bf) )): total_vertex;
graph = new int[(tempVertex)*(tempVertex)];
for(int i=0;i<tempVertex;i++){
for(int j=0;j<tempVertex;j++){
graph[i*tempVertex+j] = INF;
}
graph[i*tempVertex + i ]=0;
}
hipEventRecord(io_start);
cudaCheckErrors("4");
for(int i=0;i<edge_num;i++){
int a,b;
input >> a;
input >> b;
input >> graph[(a-1)*tempVertex + (b-1) ];
//fprintf(stderr,"graph %d %d :%d\n",a,b,graph[a*tempVertex+b]);
}
MPI_Send(&total_vertex,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(&edge_num,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(&tempVertex,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(graph,tempVertex*tempVertex,MPI_INT,1,0,MPI_COMM_WORLD);
}
else{
MPI_Recv(&total_vertex,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
MPI_Recv(&edge_num,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
MPI_Recv(&tempVertex,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
graph = new int[(tempVertex)*(tempVertex)];
MPI_Recv(graph,tempVertex*tempVertex,MPI_INT,0,0,MPI_COMM_WORLD,&status);
}
//fprintf(stderr,"IM there\n");
//fprintf(stderr,"tempVertex:%d\n",tempVertex);
//d = new int[tempVertex*tempVertex];
//hipHostMalloc((void**)&graph,sizeof(int)*tempVertex*tempVertex);
/*
graph = new int[(tempVertex)*(tempVertex)];
for(int i=0;i<tempVertex;i++){
for(int j=0;j<tempVertex;j++){
graph[i*tempVertex+j] = INF;
}
graph[i*tempVertex + i ]=0;
}
if(rank==0)
hipEventRecord(io_start);
for(int i=0;i<edge_num;i++){
int a,b;
input >> a;
input >> b;
input >> graph[(a-1)*tempVertex + (b-1) ];
//fprintf(stderr,"graph %d %d :%d\n",a,b,graph[a*tempVertex+b]);
}
*/
if(rank==0){
hipEventRecord(io_stop);
cudaCheckErrors("1");
hipEventSynchronize(io_stop);
cudaCheckErrors("2");
hipEventElapsedTime(&io_temp,io_start,io_stop);
cudaCheckErrors("3");
io_total += io_temp;
}
int* cuda_graph;
//fprintf(stderr,"1111\n");
hipMalloc((void**)&cuda_graph,sizeof(int)*(tempVertex)*(tempVertex));
cudaCheckErrors("malloc gpu");
//fprintf(stderr,"2222\n");
int FWblockDim = tempVertex / bf ;
//hipSetDevice(0);
if(rank==0){
hipEventRecord(mem_start);
}
cudaCheckErrors("oao");
hipSetDevice(rank);
hipMemcpy(cuda_graph,graph,sizeof(int)*tempVertex*tempVertex ,H2D);
cudaCheckErrors("memcpy gpu");
hipMemcpyToSymbol(cuda_bf,&bf,sizeof(int));
hipMemcpyToSymbol(cuda_total_vertex,&total_vertex,sizeof(int));
hipMemcpyToSymbol(cuda_tempVertex,&tempVertex,sizeof(int));
hipMemcpyToSymbol(cuda_device_num,&DEVICE_NUM,sizeof(int));
hipMemcpyToSymbol(cuda_FW_block,&FWblockDim,sizeof(int));
if(rank==0){
hipSetDevice(rank);
hipEventRecord(mem_stop);
hipEventSynchronize(mem_stop);
hipEventElapsedTime(&mem_temp,mem_start,mem_stop);
mem_total += mem_temp;
}
//int FWblockDim = total_vertex%bf ? (total_vertex/bf + 1) : total_vertex/bf;
//int remainBF = total_vertex%bf? total_vertex%bf : bf ;
dim3 threadStr(bf,bf);
dim3 blockStr((FWblockDim-1)/DEVICE_NUM,(FWblockDim-1)/DEVICE_NUM);
dim3 blockStr_mod((FWblockDim-1)%DEVICE_NUM,(FWblockDim-1)%DEVICE_NUM);
dim3 blockStr2((FWblockDim-1)*bf,FWblockDim-1);
if(rank==0)
hipEventRecord(com_start);
double commu1,commu2,commuTotal;
commuTotal = 0;
int* copy = new int[tempVertex*tempVertex];
//fprintf(stderr,"IM HERE\n");
if( bf ==20 && edge_num/total_vertex <= 6){
//int threadId = omp_get_thread_num();
//hipSetDevice(threadId);
int* type = new int[DEVICE_NUM];
for(int K=0;K<FWblockDim;K++){
// Phase 1
int threadId = rank;
hipSetDevice(threadId);
printf("K=%d phase1 id=%d\n",K,threadId);
hipLaunchKernelGGL(( floyd_warshall_1), dim3(1),dim3(threadStr), 0, 0, cuda_graph,K,K*bf);
cudaCheckErrors("phase 1");
//hipDeviceSynchronize()
// Phase 2
printf("K=%d phase2 id=%d\n",K,threadId);
if(FWblockDim>1){
hipLaunchKernelGGL(( floyd_warshall_2), dim3(((FWblockDim-1))*2) ,dim3(threadStr), 0, 0, cuda_graph,K,K*bf);
cudaCheckErrors("phase 2 col");
// Phase 3
if(threadId!=DEVICE_NUM-1){
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
else{
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM + (FWblockDim-1)/DEVICE_NUM)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM+(FWblockDim-1)/DEVICE_NUM)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
dim3 Str_normal((FWblockDim-1)/DEVICE_NUM,FWblockDim-1);
dim3 Str_last((FWblockDim-1)/DEVICE_NUM + ((FWblockDim-1)%DEVICE_NUM), FWblockDim-1);
printf("K=%d phase3\n",K);
if(threadId==(DEVICE_NUM-1)&&(((FWblockDim-1)%DEVICE_NUM)!=0)){
hipLaunchKernelGGL(( floyd_warshall_3), dim3(Str_last),dim3(threadStr), 0, 0, cuda_graph,K,K*bf,threadId);
cudaCheckErrors("phase 3 last");
}
else if((FWblockDim-1)/DEVICE_NUM!=0){
hipLaunchKernelGGL(( floyd_warshall_3), dim3(Str_normal),dim3(threadStr), 0, 0, cuda_graph,K,K*bf,threadId);
cudaCheckErrors("phase 3 normal");
}
}
if(FWblockDim>1){
if(rank==0){
hipSetDevice(rank);
hipEventRecord(com_stop);
hipEventSynchronize(com_stop);
hipEventElapsedTime(&com_temp,com_start,com_stop);
com_total += com_temp;
hipEventRecord(mem_start);
}
int offset,count;
int i = rank;
if(type[i]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i ;
}
if(i != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[i]==1){
count += tempVertex * bf * sizeof(int);
}
hipMemcpy(graph+offset,cuda_graph+offset,count,D2H);
cudaCheckErrors("memcpy");
// fprintf(stderr,"ori count %d : %d\n",i,count);
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&type[i],1,MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Recv(&type[j],1,MPI_INT,j,i,MPI_COMM_WORLD,&status);
//fprintf(stderr,"rank %d type%d : %d\n",rank,j,type[j]);
}
if(count>0){
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&graph[offset],count/sizeof(int),MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
fprintf(stderr,"%d %d\n",i,j);
if(type[j]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j ;
}
//fprintf(stderr,"OAO\n",i,j);
if(j != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[j]==1){
count += tempVertex * bf * sizeof(int);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
if(count>0){
double commu1 = MPI_Wtime();
MPI_Recv(&graph[offset],count/sizeof(int),MPI_INT,j,i,MPI_COMM_WORLD,&status);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
hipMemcpy(cuda_graph+offset,graph+offset,count,H2D);
cudaCheckErrors("memcpy");
}
//fprintf(stderr, "QQ %d\n",rank );
if(rank==0){
hipEventRecord(mem_stop);
hipEventSynchronize(mem_stop);
hipEventElapsedTime(&mem_temp, mem_start, mem_stop);
mem_total += mem_temp;
cudaCheckErrors("mem end");
}
}
if(rank==0){
hipEventRecord(com_start);
cudaCheckErrors("com start");
}
}
}
else{
int* type = new int[DEVICE_NUM];
for(int K=0;K<FWblockDim;K++){
// Phase 1
int threadId = rank;
hipSetDevice(threadId);
//printf("K=%d phase1\n",K);
for(int i=0;i<bf;i++){
hipLaunchKernelGGL(( floyd_warshall_beta_1), dim3(bf),dim3(bf), 0, 0, cuda_graph,K,K*bf + i);
cudaCheckErrors("phase 1");
}
//printf("K=%d phase2\n",K);
//Phase 2
if(FWblockDim>1){
for(int i=0;i<bf;i++){
hipLaunchKernelGGL(( floyd_warshall_beta_2), dim3((FWblockDim-1)*2*bf),dim3(bf), 0, 0, cuda_graph,K,K*bf + i );
cudaCheckErrors("phase 2 col");
}
if(threadId!=DEVICE_NUM-1){
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
else{
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM + (FWblockDim-1)/DEVICE_NUM)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM+(FWblockDim-1)/DEVICE_NUM)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
//printf("K=%d phase3\n",K);
//Phase 3
dim3 Str_normal((FWblockDim-1)*bf,(FWblockDim-1)/DEVICE_NUM);
dim3 Str_last((FWblockDim-1)*bf, (FWblockDim-1)/DEVICE_NUM + ((FWblockDim-1)%DEVICE_NUM) );
if(threadId==(DEVICE_NUM-1)&&(((FWblockDim-1)%DEVICE_NUM)!=0)){
hipLaunchKernelGGL(( floyd_warshall_beta_3), dim3(Str_last),dim3(bf), 0, 0, cuda_graph,K,K*bf,FWblockDim-1,threadId);
}
else if((FWblockDim-1)/DEVICE_NUM!=0){
hipLaunchKernelGGL(( floyd_warshall_beta_3), dim3(Str_normal),dim3(bf), 0, 0, cuda_graph,K,K*bf,FWblockDim-1,threadId);
}
cudaCheckErrors("phase 3");
}
if(FWblockDim>1){
if(rank==0){
hipSetDevice(rank);
hipEventRecord(com_stop);
hipEventSynchronize(com_stop);
hipEventElapsedTime(&com_temp,com_start,com_stop);
com_total += com_temp;
hipEventRecord(mem_start);
}
int offset,count;
int i = rank;
if(type[i]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i ;
}
if(i != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[i]==1){
count += tempVertex * bf * sizeof(int);
}
hipMemcpy(graph+offset,cuda_graph+offset,count,D2H);
cudaCheckErrors("memcpy");
// fprintf(stderr,"ori count %d : %d\n",i,count);
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&type[i],1,MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Recv(&type[j],1,MPI_INT,j,i,MPI_COMM_WORLD,&status);
//fprintf(stderr,"rank %d type%d : %d\n",rank,j,type[j]);
}
if(count>0){
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&graph[offset],count/sizeof(int),MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
//fprintf(stderr,"%d %d\n",i,j);
if(type[j]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j ;
}
//fprintf(stderr,"OAO\n",i,j);
if(j != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[j]==1){
count += tempVertex * bf * sizeof(int);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
if(count>0){
commu1 = MPI_Wtime();
MPI_Recv(&graph[offset],count/sizeof(int),MPI_INT,j,i,MPI_COMM_WORLD,&status);
commu2 = MPI_Wtime();
commuTotal += commu2 - commu1;
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
hipMemcpy(cuda_graph+offset,graph+offset,count,H2D);
cudaCheckErrors("memcpy");
}
//fprintf(stderr, "QQ %d\n",rank );
if(rank==0){
hipEventRecord(mem_stop);
hipEventSynchronize(mem_stop);
hipEventElapsedTime(&mem_temp, mem_start, mem_stop);
mem_total += mem_temp;
cudaCheckErrors("mem end");
}
}
if(rank==0){
hipEventRecord(com_start);
cudaCheckErrors("com start");
}
}
}
//fprintf(stderr,"IM THERE %d\n",rank);
//fprintf(stderr,"%d QAQ \n",rank);
MPI_Finalize();
hipSetDevice(rank);
hipDeviceSynchronize();
// ??
if(rank==0){
hipEventRecord(com_stop);
hipEventSynchronize(com_stop);
hipEventElapsedTime(&com_temp,com_start,com_stop);
com_total+=com_temp;
}
//fprintf(stderr,"%d QAQ 2\n",rank);
//fprintf(stderr,"qqq %s %s \n",typeid((&graph[0])+10),typeid(cuda_graph[1]+10));
if(rank==0){
hipEventRecord(mem_start);
hipMemcpy(graph,cuda_graph,sizeof(int)*tempVertex*tempVertex,D2H);
//fprintf(stderr,"%d QAQ QAQ\n",rank);
cudaCheckErrors("copy back error");
hipEventRecord(mem_stop);
hipEventSynchronize(mem_stop);
hipEventElapsedTime(&mem_temp,mem_start,mem_stop);
mem_total += mem_temp;
}
cudaCheckErrors("QQQ");
//fprintf(stderr,"%d QAQ3 \n",rank);
/*
#pragma omp parallel num_threads(DEVICE_NUM)
{
int* tempGraph = new int[tempVertex*tempVertex];
int threadId = omp_get_thread_num();
hipSetDevice(threadId);
int offset = tempVertex*(tempVertex/DEVICE_NUM) * threadId;
int count = threadId==(DEVICE_NUM-1)? tempVertex*( (tempVertex/DEVICE_NUM) + (tempVertex%DEVICE_NUM)) : tempVertex*(tempVertex/DEVICE_NUM) ;
hipMemcpy(tempGraph,(cuda_graph[threadId]),sizeof(int)*tempVertex*tempVertex,D2H);
for(int i=offset;i<offset+count;i++){
graph[i] = tempGraph[i];
}
cudaCheckErrors("copy back error");
}
*/
if(rank==0){
hipEventRecord(io_start);
output.open(argv[2]);
// SPACE!!!!!!!
for(int i=0;i<total_vertex;i++){
for(int j=0;j<total_vertex;j++){
if(graph[i*tempVertex+j]==INF){
output<<"INF";
}
else{
output<<graph[i*tempVertex+j];
}
output<<" ";
}
output<<endl;
}
hipEventRecord(io_stop);
hipEventSynchronize(io_stop);
hipEventElapsedTime(&io_temp,io_start,io_stop);
io_total += io_temp;
hipEventRecord(total_stop);
hipEventSynchronize(total_stop);
hipEventElapsedTime(&total_temp, total_start, total_stop);
}
if(rank==0){
fprintf(stderr, "\n\n");
fprintf(stderr, "TOTAL = %f\n", total_temp);
fprintf(stderr, "COMPUTE = %f\n", com_total);
fprintf(stderr, "MEMORY = %f\n", mem_total);
fprintf(stderr, "IO = %f\n", io_total);
fprintf(stderr,"MPI_COM = %lf\n",commuTotal);
}
return 0;
} | 2759498ab8c0f05caca6158cdd54dcd025b29d9a.cu | #include <cstdio>
#include <cstdlib>
#include <iostream>
#include <fstream>
#include <mpi.h>
int* d;
__constant__ int cuda_bf;
__constant__ int cuda_total_vertex;
__constant__ int cuda_tempVertex;
__constant__ int cuda_device_num;
__constant__ int cuda_FW_block;
#define INF 1e9
#define H2D cudaMemcpyHostToDevice
#define D2H cudaMemcpyDeviceToHost
#define D2D cudaMemcpyDeviceToDevice
using namespace std;
int
init_device ()
{
cudaSetDevice(0);
return 0;
}
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), __FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
//extern __shared__ int D[];
__global__ void floyd_warshall_1(int* dist,int k ,int kbf){
int idx,idy;
idx = k ;
idy = k ;
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
__shared__ int D[32*32];
D[threadIdx.y*cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
__syncthreads();
// Put to shared memory???
int x = 0;
//int dij = dist[i*total_vertex + j];
//int dik = dist[i*total_vertex + k];
//int dkj = dist[k*total_vertex + j];
int dij ,dik,dkj;
int a = threadIdx.y * cuda_bf + threadIdx.x;
int b = threadIdx.y * cuda_bf;
while( x < cuda_bf ){
dij = D[a];
dik = D[b + x];
dkj = D[x*cuda_bf + threadIdx.x];
if(dij>dik+dkj){
D[a] = dik + dkj;
}
__syncthreads();
x++;
}
dist[i*cuda_tempVertex + j] = D[threadIdx.y*cuda_bf + threadIdx.x];
return ;
}
__global__ void floyd_warshall_2(int* dist,int k , int kbf ){
int idx,idy;
if(blockIdx.x % 2 == 0 ){
idx = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2);
idy = k;
}
else {
idx = k;
idy = (blockIdx.x/2) >= k ? (blockIdx.x/2+1):(blockIdx.x/2);
}
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
//bool flag = 0;
//if(i>=cuda_total_vertex||j>=cuda_total_vertex)
// return;
__shared__ int D2[32*32*2];
D2[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
D2[(cuda_bf*cuda_bf) + (threadIdx.y *cuda_bf ) + (threadIdx.x)] = dist[ (kbf+threadIdx.y) * cuda_tempVertex + (kbf +threadIdx.x)];
__syncthreads();
// Put to shared memory???
int x = 0;
int dij ,dik,dkj;
int a = (threadIdx.y * cuda_bf + threadIdx.x);
int b;
if(blockIdx.x%2==0){
b = cuda_bf*cuda_bf + threadIdx.x;
}
else{
b = cuda_bf*cuda_bf + cuda_bf*threadIdx.y;
}
dij = D2[a];
while(x<cuda_bf){
if(blockIdx.x%2==0){
dik = D2[cuda_bf*threadIdx.y + x];
dkj = D2[b + (x*cuda_bf)];
}
else{
dik = D2[b + x];
dkj = D2[x*cuda_bf + threadIdx.x];
}
if(dij>dik+dkj){
dij = dik + dkj;
}
__syncthreads();
x++;
}
dist[i*cuda_tempVertex + j] = dij;
return ;
}
__global__ void floyd_warshall_3(int* dist, int k ,int kbf,int ID){
int idx,idy;
int blockIdx_x = ((cuda_FW_block-1)/cuda_device_num)*ID + blockIdx.x;
idy = blockIdx.y >= k? blockIdx.y + 1 : blockIdx.y;
idx = blockIdx_x >= k? blockIdx_x + 1 : blockIdx_x;
int i = cuda_bf * idx + threadIdx.y;
int j = cuda_bf * idy + threadIdx.x;
//if(i>=cuda_total_vertex||j>=cuda_total_vertex)
// return ;
__shared__ int D3[32*32*3];
D3[threadIdx.y * cuda_bf + threadIdx.x] = dist[i*cuda_tempVertex + j];
D3[(cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(cuda_bf*idx+threadIdx.y)*cuda_tempVertex + (kbf + threadIdx.x)];
D3[(2*cuda_bf*cuda_bf) + (threadIdx.y*cuda_bf) + threadIdx.x] = dist[(kbf+threadIdx.y)*cuda_tempVertex + (idy*cuda_bf+threadIdx.x)];
__syncthreads();
// Put to shared memory???
int x = 0;
int dij ,dik,dkj;
int a =threadIdx.y * cuda_bf + threadIdx.x;
int b = cuda_bf*cuda_bf + threadIdx.y*cuda_bf;
int c = 2*cuda_bf*cuda_bf + threadIdx.x;
dij = D3[a];
while(x<cuda_bf){
dik = D3[b + x];
dkj = D3[x*cuda_bf + c];
if(dij>dik+dkj){
dij = dik + dkj;
}
x++;
}
dist[i*cuda_tempVertex + j] = dij;
return ;
}
__global__ void floyd_warshall_beta_1(int* dist, int k , int kbf ){
int idx,idy;
idx = k;
idy = k;
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int dij = dist[i*cuda_tempVertex + j];
int dik = dist[i*cuda_tempVertex + kbf];
int dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex+j] = dik + dkj;
}
return ;
}
__global__ void floyd_warshall_beta_2(int* dist, int k , int kbf ){
int idx,idy;
int temp = blockIdx.x / cuda_bf;
if( (temp) % 2 == 0 ){
idx = (temp/2) >= k ? (temp/2+1):(temp/2);
idy = k;
}
else {
idx = k;
idy = (temp/2) >= k ? (temp/2+1):(temp/2);
}
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int dij = dist[i*cuda_tempVertex + j];
int dik = dist[i*cuda_tempVertex + kbf];
int dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex+j] = dik + dkj;
}
return ;
}
__global__ void floyd_warshall_beta_3(int* dist, int k , int kbf ,int grid_size,int ID ){
int idx,idy;
int blockIdx_y = ((cuda_FW_block-1)/cuda_device_num)*ID + blockIdx.y;
int temp = ((blockIdx_y*gridDim.x) + blockIdx.x) / cuda_bf;
idx = temp/grid_size >= k? temp/grid_size + 1 : temp/grid_size;
idy = temp % grid_size >= k? temp%grid_size + 1 : temp % grid_size;
int i = cuda_bf * idx + (blockIdx.x%cuda_bf);
int j = cuda_bf * idy + threadIdx.x;
if(i>=cuda_total_vertex||j>=cuda_total_vertex)
return ;
// Put to shared memory???
int x = kbf + cuda_bf;
int dij ,dik,dkj;
while(kbf<x){
dij = dist[i*cuda_tempVertex + j];
dik = dist[i*cuda_tempVertex + kbf];
dkj = dist[kbf*cuda_tempVertex + j];
if(dij>dik+dkj){
dist[i*cuda_tempVertex + j] = dik + dkj;
}
//__syncthreads();
kbf++;
}
return;
}
int main(int argc,char* argv[]){
cudaEvent_t total_start, total_stop;
cudaEvent_t com_start, com_stop;
cudaEvent_t mem_start, mem_stop;
cudaEvent_t io_start, io_stop;
MPI_Status status;
MPI_Request req;
float total_temp=0,total_total=0,io_temp =0 , io_total=0 , com_temp =0,com_total=0 , mem_temp=0 , mem_total=0;
int rc = MPI_Init(&argc,&argv);
int rank , process_num;
if(rc!= MPI_SUCCESS){
printf("Error when initializing mpi \n");
}
MPI_Comm_size(MPI_COMM_WORLD,&process_num);
MPI_Comm_rank(MPI_COMM_WORLD,&rank);
if(rank==0){
cudaEventCreate(&total_start);
cudaEventCreate(&total_stop);
cudaEventCreate(&com_start);
cudaEventCreate(&com_stop);
cudaEventCreate(&mem_start);
cudaEventCreate(&mem_stop);
cudaEventCreate(&io_start);
cudaEventCreate(&io_stop);
}
cudaSetDevice(rank);
cudaCheckErrors("???");
if(rank==0)
cudaEventRecord(total_start);
//
//struct cudaDeviceProp prop;
//cudaGetDeviceProperties(&prop,0);
//fprintf(stderr,"clock rate %lf\n",prop.clockRate);
int bf = atoi(argv[3]);
int total_vertex;
int edge_num;
int DEVICE_NUM = 2;
int tempVertex;
int * graph;// = new int[(tempVertex)*(tempVertex)];
ifstream input;
ofstream output;
//fprintf(stderr,"IM here\n");
if(rank==0){
input.open(argv[1]);
input >> total_vertex;
input >> edge_num;
tempVertex = total_vertex % bf ? (total_vertex + (bf - (total_vertex%bf) )): total_vertex;
graph = new int[(tempVertex)*(tempVertex)];
for(int i=0;i<tempVertex;i++){
for(int j=0;j<tempVertex;j++){
graph[i*tempVertex+j] = INF;
}
graph[i*tempVertex + i ]=0;
}
cudaEventRecord(io_start);
cudaCheckErrors("4");
for(int i=0;i<edge_num;i++){
int a,b;
input >> a;
input >> b;
input >> graph[(a-1)*tempVertex + (b-1) ];
//fprintf(stderr,"graph %d %d :%d\n",a,b,graph[a*tempVertex+b]);
}
MPI_Send(&total_vertex,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(&edge_num,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(&tempVertex,1,MPI_INT,1,0,MPI_COMM_WORLD);
MPI_Send(graph,tempVertex*tempVertex,MPI_INT,1,0,MPI_COMM_WORLD);
}
else{
MPI_Recv(&total_vertex,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
MPI_Recv(&edge_num,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
MPI_Recv(&tempVertex,1,MPI_INT,0,0,MPI_COMM_WORLD,&status);
graph = new int[(tempVertex)*(tempVertex)];
MPI_Recv(graph,tempVertex*tempVertex,MPI_INT,0,0,MPI_COMM_WORLD,&status);
}
//fprintf(stderr,"IM there\n");
//fprintf(stderr,"tempVertex:%d\n",tempVertex);
//d = new int[tempVertex*tempVertex];
//cudaMallocHost((void**)&graph,sizeof(int)*tempVertex*tempVertex);
/*
graph = new int[(tempVertex)*(tempVertex)];
for(int i=0;i<tempVertex;i++){
for(int j=0;j<tempVertex;j++){
graph[i*tempVertex+j] = INF;
}
graph[i*tempVertex + i ]=0;
}
if(rank==0)
cudaEventRecord(io_start);
for(int i=0;i<edge_num;i++){
int a,b;
input >> a;
input >> b;
input >> graph[(a-1)*tempVertex + (b-1) ];
//fprintf(stderr,"graph %d %d :%d\n",a,b,graph[a*tempVertex+b]);
}
*/
if(rank==0){
cudaEventRecord(io_stop);
cudaCheckErrors("1");
cudaEventSynchronize(io_stop);
cudaCheckErrors("2");
cudaEventElapsedTime(&io_temp,io_start,io_stop);
cudaCheckErrors("3");
io_total += io_temp;
}
int* cuda_graph;
//fprintf(stderr,"1111\n");
cudaMalloc((void**)&cuda_graph,sizeof(int)*(tempVertex)*(tempVertex));
cudaCheckErrors("malloc gpu");
//fprintf(stderr,"2222\n");
int FWblockDim = tempVertex / bf ;
//cudaSetDevice(0);
if(rank==0){
cudaEventRecord(mem_start);
}
cudaCheckErrors("oao");
cudaSetDevice(rank);
cudaMemcpy(cuda_graph,graph,sizeof(int)*tempVertex*tempVertex ,H2D);
cudaCheckErrors("memcpy gpu");
cudaMemcpyToSymbol(cuda_bf,&bf,sizeof(int));
cudaMemcpyToSymbol(cuda_total_vertex,&total_vertex,sizeof(int));
cudaMemcpyToSymbol(cuda_tempVertex,&tempVertex,sizeof(int));
cudaMemcpyToSymbol(cuda_device_num,&DEVICE_NUM,sizeof(int));
cudaMemcpyToSymbol(cuda_FW_block,&FWblockDim,sizeof(int));
if(rank==0){
cudaSetDevice(rank);
cudaEventRecord(mem_stop);
cudaEventSynchronize(mem_stop);
cudaEventElapsedTime(&mem_temp,mem_start,mem_stop);
mem_total += mem_temp;
}
//int FWblockDim = total_vertex%bf ? (total_vertex/bf + 1) : total_vertex/bf;
//int remainBF = total_vertex%bf? total_vertex%bf : bf ;
dim3 threadStr(bf,bf);
dim3 blockStr((FWblockDim-1)/DEVICE_NUM,(FWblockDim-1)/DEVICE_NUM);
dim3 blockStr_mod((FWblockDim-1)%DEVICE_NUM,(FWblockDim-1)%DEVICE_NUM);
dim3 blockStr2((FWblockDim-1)*bf,FWblockDim-1);
if(rank==0)
cudaEventRecord(com_start);
double commu1,commu2,commuTotal;
commuTotal = 0;
int* copy = new int[tempVertex*tempVertex];
//fprintf(stderr,"IM HERE\n");
if( bf ==20 && edge_num/total_vertex <= 6){
//int threadId = omp_get_thread_num();
//cudaSetDevice(threadId);
int* type = new int[DEVICE_NUM];
for(int K=0;K<FWblockDim;K++){
// Phase 1
int threadId = rank;
cudaSetDevice(threadId);
printf("K=%d phase1 id=%d\n",K,threadId);
floyd_warshall_1<<<1,threadStr>>>(cuda_graph,K,K*bf);
cudaCheckErrors("phase 1");
//cudaDeviceSynchronize()
// Phase 2
printf("K=%d phase2 id=%d\n",K,threadId);
if(FWblockDim>1){
floyd_warshall_2<<< ((FWblockDim-1))*2 ,threadStr>>>(cuda_graph,K,K*bf);
cudaCheckErrors("phase 2 col");
// Phase 3
if(threadId!=DEVICE_NUM-1){
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
else{
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM + (FWblockDim-1)/DEVICE_NUM)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM+(FWblockDim-1)/DEVICE_NUM)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
dim3 Str_normal((FWblockDim-1)/DEVICE_NUM,FWblockDim-1);
dim3 Str_last((FWblockDim-1)/DEVICE_NUM + ((FWblockDim-1)%DEVICE_NUM), FWblockDim-1);
printf("K=%d phase3\n",K);
if(threadId==(DEVICE_NUM-1)&&(((FWblockDim-1)%DEVICE_NUM)!=0)){
floyd_warshall_3<<<Str_last,threadStr>>>(cuda_graph,K,K*bf,threadId);
cudaCheckErrors("phase 3 last");
}
else if((FWblockDim-1)/DEVICE_NUM!=0){
floyd_warshall_3<<<Str_normal,threadStr>>>(cuda_graph,K,K*bf,threadId);
cudaCheckErrors("phase 3 normal");
}
}
if(FWblockDim>1){
if(rank==0){
cudaSetDevice(rank);
cudaEventRecord(com_stop);
cudaEventSynchronize(com_stop);
cudaEventElapsedTime(&com_temp,com_start,com_stop);
com_total += com_temp;
cudaEventRecord(mem_start);
}
int offset,count;
int i = rank;
if(type[i]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i ;
}
if(i != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[i]==1){
count += tempVertex * bf * sizeof(int);
}
cudaMemcpy(graph+offset,cuda_graph+offset,count,D2H);
cudaCheckErrors("memcpy");
// fprintf(stderr,"ori count %d : %d\n",i,count);
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&type[i],1,MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Recv(&type[j],1,MPI_INT,j,i,MPI_COMM_WORLD,&status);
//fprintf(stderr,"rank %d type%d : %d\n",rank,j,type[j]);
}
if(count>0){
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&graph[offset],count/sizeof(int),MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
fprintf(stderr,"%d %d\n",i,j);
if(type[j]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j ;
}
//fprintf(stderr,"OAO\n",i,j);
if(j != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[j]==1){
count += tempVertex * bf * sizeof(int);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
if(count>0){
double commu1 = MPI_Wtime();
MPI_Recv(&graph[offset],count/sizeof(int),MPI_INT,j,i,MPI_COMM_WORLD,&status);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
cudaMemcpy(cuda_graph+offset,graph+offset,count,H2D);
cudaCheckErrors("memcpy");
}
//fprintf(stderr, "QQ %d\n",rank );
if(rank==0){
cudaEventRecord(mem_stop);
cudaEventSynchronize(mem_stop);
cudaEventElapsedTime(&mem_temp, mem_start, mem_stop);
mem_total += mem_temp;
cudaCheckErrors("mem end");
}
}
if(rank==0){
cudaEventRecord(com_start);
cudaCheckErrors("com start");
}
}
}
else{
int* type = new int[DEVICE_NUM];
for(int K=0;K<FWblockDim;K++){
// Phase 1
int threadId = rank;
cudaSetDevice(threadId);
//printf("K=%d phase1\n",K);
for(int i=0;i<bf;i++){
floyd_warshall_beta_1<<<bf,bf>>>(cuda_graph,K,K*bf + i);
cudaCheckErrors("phase 1");
}
//printf("K=%d phase2\n",K);
//Phase 2
if(FWblockDim>1){
for(int i=0;i<bf;i++){
floyd_warshall_beta_2<<<(FWblockDim-1)*2*bf,bf>>>(cuda_graph,K,K*bf + i );
cudaCheckErrors("phase 2 col");
}
if(threadId!=DEVICE_NUM-1){
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId+1)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
else{
if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM + (FWblockDim-1)/DEVICE_NUM)<=K){
type[threadId] = 0;
}
else if(((FWblockDim-1)/DEVICE_NUM)*threadId<K&&((FWblockDim-1)/DEVICE_NUM)*(threadId) + ((FWblockDim-1)%DEVICE_NUM+(FWblockDim-1)/DEVICE_NUM)>K){
type[threadId] = 1;
}
else{
type[threadId] = 2;
}
}
//printf("K=%d phase3\n",K);
//Phase 3
dim3 Str_normal((FWblockDim-1)*bf,(FWblockDim-1)/DEVICE_NUM);
dim3 Str_last((FWblockDim-1)*bf, (FWblockDim-1)/DEVICE_NUM + ((FWblockDim-1)%DEVICE_NUM) );
if(threadId==(DEVICE_NUM-1)&&(((FWblockDim-1)%DEVICE_NUM)!=0)){
floyd_warshall_beta_3<<<Str_last,bf>>>(cuda_graph,K,K*bf,FWblockDim-1,threadId);
}
else if((FWblockDim-1)/DEVICE_NUM!=0){
floyd_warshall_beta_3<<<Str_normal,bf>>>(cuda_graph,K,K*bf,FWblockDim-1,threadId);
}
cudaCheckErrors("phase 3");
}
if(FWblockDim>1){
if(rank==0){
cudaSetDevice(rank);
cudaEventRecord(com_stop);
cudaEventSynchronize(com_stop);
cudaEventElapsedTime(&com_temp,com_start,com_stop);
com_total += com_temp;
cudaEventRecord(mem_start);
}
int offset,count;
int i = rank;
if(type[i]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*i ;
}
if(i != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[i]==1){
count += tempVertex * bf * sizeof(int);
}
cudaMemcpy(graph+offset,cuda_graph+offset,count,D2H);
cudaCheckErrors("memcpy");
// fprintf(stderr,"ori count %d : %d\n",i,count);
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&type[i],1,MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Recv(&type[j],1,MPI_INT,j,i,MPI_COMM_WORLD,&status);
//fprintf(stderr,"rank %d type%d : %d\n",rank,j,type[j]);
}
if(count>0){
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
MPI_Isend(&graph[offset],count/sizeof(int),MPI_INT,j,j,MPI_COMM_WORLD,&req);
}
}
for(int j=0;j<DEVICE_NUM;j++){
if(i==j)
continue;
//fprintf(stderr,"%d %d\n",i,j);
if(type[j]==2){
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j + tempVertex*bf;
}
else{
offset = tempVertex*((FWblockDim-1)/DEVICE_NUM*bf )*j ;
}
//fprintf(stderr,"OAO\n",i,j);
if(j != DEVICE_NUM-1){
count = tempVertex*sizeof(int)*((FWblockDim-1)/DEVICE_NUM*bf) ;
}
else{
count = tempVertex*sizeof(int)*(((FWblockDim-1)/DEVICE_NUM*bf)+((FWblockDim-1)%DEVICE_NUM*bf));
}
if(type[j]==1){
count += tempVertex * bf * sizeof(int);
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
if(count>0){
commu1 = MPI_Wtime();
MPI_Recv(&graph[offset],count/sizeof(int),MPI_INT,j,i,MPI_COMM_WORLD,&status);
commu2 = MPI_Wtime();
commuTotal += commu2 - commu1;
}
//fprintf(stderr,"i:%d j:%d offset:%d count:%d addi%d addoff%d typei:%d typej:%d \n",i,j,offset,count,cuda_graph[i],cuda_graph[i]+offset,type[i],type[j]);
cudaMemcpy(cuda_graph+offset,graph+offset,count,H2D);
cudaCheckErrors("memcpy");
}
//fprintf(stderr, "QQ %d\n",rank );
if(rank==0){
cudaEventRecord(mem_stop);
cudaEventSynchronize(mem_stop);
cudaEventElapsedTime(&mem_temp, mem_start, mem_stop);
mem_total += mem_temp;
cudaCheckErrors("mem end");
}
}
if(rank==0){
cudaEventRecord(com_start);
cudaCheckErrors("com start");
}
}
}
//fprintf(stderr,"IM THERE %d\n",rank);
//fprintf(stderr,"%d QAQ \n",rank);
MPI_Finalize();
cudaSetDevice(rank);
cudaDeviceSynchronize();
// 時間計算是否要擺到前面??
if(rank==0){
cudaEventRecord(com_stop);
cudaEventSynchronize(com_stop);
cudaEventElapsedTime(&com_temp,com_start,com_stop);
com_total+=com_temp;
}
//fprintf(stderr,"%d QAQ 2\n",rank);
//fprintf(stderr,"qqq %s %s \n",typeid((&graph[0])+10),typeid(cuda_graph[1]+10));
if(rank==0){
cudaEventRecord(mem_start);
cudaMemcpy(graph,cuda_graph,sizeof(int)*tempVertex*tempVertex,D2H);
//fprintf(stderr,"%d QAQ QAQ\n",rank);
cudaCheckErrors("copy back error");
cudaEventRecord(mem_stop);
cudaEventSynchronize(mem_stop);
cudaEventElapsedTime(&mem_temp,mem_start,mem_stop);
mem_total += mem_temp;
}
cudaCheckErrors("QQQ");
//fprintf(stderr,"%d QAQ3 \n",rank);
/*
#pragma omp parallel num_threads(DEVICE_NUM)
{
int* tempGraph = new int[tempVertex*tempVertex];
int threadId = omp_get_thread_num();
cudaSetDevice(threadId);
int offset = tempVertex*(tempVertex/DEVICE_NUM) * threadId;
int count = threadId==(DEVICE_NUM-1)? tempVertex*( (tempVertex/DEVICE_NUM) + (tempVertex%DEVICE_NUM)) : tempVertex*(tempVertex/DEVICE_NUM) ;
cudaMemcpy(tempGraph,(cuda_graph[threadId]),sizeof(int)*tempVertex*tempVertex,D2H);
for(int i=offset;i<offset+count;i++){
graph[i] = tempGraph[i];
}
cudaCheckErrors("copy back error");
}
*/
if(rank==0){
cudaEventRecord(io_start);
output.open(argv[2]);
// 每行最後面到底要不要加SPACE!!!!!!!
for(int i=0;i<total_vertex;i++){
for(int j=0;j<total_vertex;j++){
if(graph[i*tempVertex+j]==INF){
output<<"INF";
}
else{
output<<graph[i*tempVertex+j];
}
output<<" ";
}
output<<endl;
}
cudaEventRecord(io_stop);
cudaEventSynchronize(io_stop);
cudaEventElapsedTime(&io_temp,io_start,io_stop);
io_total += io_temp;
cudaEventRecord(total_stop);
cudaEventSynchronize(total_stop);
cudaEventElapsedTime(&total_temp, total_start, total_stop);
}
if(rank==0){
fprintf(stderr, "\n\n");
fprintf(stderr, "TOTAL = %f\n", total_temp);
fprintf(stderr, "COMPUTE = %f\n", com_total);
fprintf(stderr, "MEMORY = %f\n", mem_total);
fprintf(stderr, "IO = %f\n", io_total);
fprintf(stderr,"MPI_COM = %lf\n",commuTotal);
}
return 0;
} |
c0f5498cf761fe76b3515c4845c049d506b4e499.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE 1 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE][BOX_SIZE];
int row = bx * BOX_SIZE + tx; //row of image
int col = by * BOX_SIZE + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
hipError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != hipSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
hipError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
hipEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
hipError_t cudaStatus;
cudaStatus = hipSetDevice(0); // use the first GPU
if (cudaStatus != hipSuccess){
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
hipEventCreate(&time1);
hipEventCreate(&time2);
hipEventCreate(&time3);
hipEventCreate(&time4);
hipEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = hipMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
cudaStatus = hipMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPU_odata, zero.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = hipMemcpy(GPU_idata, image.data, TotalGPUSize, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
hipLaunchKernelGGL(( rotate_kernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = hipGetLastError();
if (cudaStatus != hipSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = hipDeviceSynchronize();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, hipGetErrorString(cudaStatus));
goto Error;
}
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy failed!\n");
goto Error;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1);
hipEventSynchronize(time2);
hipEventSynchronize(time3);
hipEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecutionTime, time2, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
hipFree(GPU_odata);
hipFree(GPU_idata);
hipFree(GPU_zerodata);
hipEventDestroy(time1);
hipEventDestroy(time2);
hipEventDestroy(time3);
hipEventDestroy(time4);
a++;
return cudaStatus;
}
| c0f5498cf761fe76b3515c4845c049d506b4e499.cu | #include <stdio.h>
#include <stdint.h>
#include <ctype.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
//CUDA STUFF:
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
//OpenCV stuff
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes);
#define BOX_SIZE 1 // ThreadsPerBlock == BOX_SIZE * BOX_SIZE
int M; //number of rows in image
int N; //number of columns in image
int NumRot;
int a = 0;
Mat zero;
//ip.Vpixels <--> M
//ip.Hpixels <--> N
__global__ void rotate_kernel(uchar *GPU_i, uchar *GPU_o, int M, int N, int i, int j){
//Block index
int bx = blockIdx.x;
int by = blockIdx.y;
//Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
__shared__ uchar shared_GPU_data[BOX_SIZE][BOX_SIZE];
int row = bx * BOX_SIZE + tx; //row of image
int col = by * BOX_SIZE + ty; //column of image
int idx = row*N + col; //which pixel in full 1D array
shared_GPU_data[tx][ty] = GPU_i[idx];
__syncthreads();
int h,v,c;
int row2; //new row of image
int col2; //new column of image
double X, Y, newY, newX, ScaleFactor;
double Diagonal, H, V;
double RotDegrees = 360 / j * i; //in degrees
double RotAngle = 2*3.141592/360.000*(double) RotDegrees; //in radians
//printf("We are rotating %d times and iteration# = %d RotAngle = %g\n", j, i, RotAngle);
// transpose image coordinates to Cartesian coordinates
// integer div
c = col;
h=N/2; //halfway of column pixels
v=M/2; //halfway of horizontal pixels
X=(double)c-(double)h;
Y=(double)v-(double)row;
// pixel rotation matrix
newX = cos(RotAngle) * X - sin(RotAngle) * Y;
newY= sin (RotAngle) * X + cos(RotAngle) * Y;
// Scale to fit everything in the image box CONFIRMED TO BE CORRECT
H=(double)N;
V=(double)M;
Diagonal=sqrt(H*H+V*V);
ScaleFactor=(N>M) ? V/Diagonal : H/Diagonal;
newX=newX*ScaleFactor;
newY = newY*ScaleFactor;
// convert back from Cartesian to image coordinates
col2= (int)newX+h;
row2=v-(int)newY;
// maps old pixel to new pixel
int idx2 = row2*N + col2;
GPU_o[idx2] = shared_GPU_data[tx][ty];
}
int main(int argc, char *argv[]){
float GPURuntimes[4]; // run times of the GPU code
float ExecTotalTime, GPUTotalTime;
cudaError_t cudaStatus;
char filename[100]; //output file name
int i;
int *CPU_OutputArray = (int*) 0; // where the GPU should copy the output back to
if (argc != 4){
printf("Improper usage!\n");
printf("Usage: %s <input image> <output image> <N rotations>\n", argv[0]);
exit(EXIT_FAILURE);
}
NumRot = atoi(argv[3]);
if (NumRot > 30){
printf("Number of rotations requested is too high! Adjusted to 30.\n");
NumRot = 30;
}
for (i = 0; i<NumRot; i++){
// Load image:
Mat image;
image = imread(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
if (! image.data){
fprintf(stderr, "Could not open or find the image.\n");
exit(EXIT_FAILURE);
}
printf("Loaded image '%s', size = %dx%d (dims = %d).\n", argv[1], image.cols, image.rows, image.dims);
//set up global variables for image size
M = image.rows;
N = image.cols;
// Create CPU memory to store the output;
/*Mat */zero = Mat(M,N,CV_8UC1, Scalar(255)); //start by making every pixel white
sprintf(filename,"%sAROT%d.png", argv[2], i);
imwrite(filename,zero);
CPU_OutputArray = (int*) malloc(M*N*sizeof(int));
if (CPU_OutputArray == NULL){
fprintf(stderr, "OOPS. Can't create CPU_OutputArray using malloc() ...\n");
exit(EXIT_FAILURE);
}
//run it
cudaStatus = launch_helper(image, CPU_OutputArray, GPURuntimes);
if (cudaStatus != cudaSuccess){
fprintf(stderr, "launch_helper failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("-----------------------------------------------------------------\n");
printf("Tfr CPU->GPU = %5.2f ms ... \nExecution = %5.2f ms ... \nTfr GPU->CPU = %5.2f ms \nSum of Iteration = %5.2f ms\n",
GPURuntimes[1], GPURuntimes[2], GPURuntimes[3], GPURuntimes[0]);
ExecTotalTime += GPURuntimes[0];
GPUTotalTime += GPURuntimes[2];
printf("\nGPU Execution Time = %5.2f ms \n", GPUTotalTime);
printf("Total Execution Time = %5.2f ms\n", ExecTotalTime);
printf("-----------------------------------------------------------------\n");
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaDeviceReset failed!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
//save image to disk
Mat result = Mat(M,N,CV_8UC1, CPU_OutputArray);
imwrite(filename,result);
if (!imwrite(filename, result)){
fprintf(stderr, "couldn't write output to disk!\n");
free(CPU_OutputArray);
exit(EXIT_FAILURE);
}
printf("Saved image '%s', size = %dx%d (dims = %d).\n",
//filename.c_str(), result.cols, result.rows, result.dims);
filename, result.cols, result.rows, result.dims);
free(CPU_OutputArray);
}
exit(EXIT_SUCCESS);
}
cudaError_t launch_helper(Mat image, int *CPU_OutputArray, float* Runtimes){
cudaEvent_t time1, time2, time3, time4;
int TotalGPUSize; // total size of 1 image in bytes
uchar *GPU_idata;
uchar *GPU_odata;
uchar *GPU_zerodata;
dim3 threadsPerBlock;
dim3 numBlocks;
cudaError_t cudaStatus;
cudaStatus = cudaSetDevice(0); // use the first GPU
if (cudaStatus != cudaSuccess){
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto Error;
}
cudaEventCreate(&time1);
cudaEventCreate(&time2);
cudaEventCreate(&time3);
cudaEventCreate(&time4);
cudaEventRecord(time1, 0);
// Allocate GPU buffer for inputs and outputs:
TotalGPUSize = M * N * sizeof(uchar);
cudaStatus = cudaMalloc((void**)&GPU_idata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_odata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
cudaStatus = cudaMalloc((void**)&GPU_zerodata, TotalGPUSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed!\n");
goto Error;
}
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPU_odata, zero.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpyzero failed!\n");
goto Error;
}
cudaStatus = cudaMemcpy(GPU_idata, image.data, TotalGPUSize, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time2, 0);
// Launch a kernel on the GPU with one thread for each pixel.
threadsPerBlock = dim3(BOX_SIZE, BOX_SIZE);
numBlocks = dim3(M / threadsPerBlock.x, N / threadsPerBlock.y);
rotate_kernel<<<numBlocks, threadsPerBlock>>>(GPU_idata, GPU_odata, M, N, a, NumRot);
// Check for errors immediately after kernel launch.
cudaStatus = cudaGetLastError();
if (cudaStatus != cudaSuccess){
fprintf(stderr, "error code %d (%s) launching kernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
cudaStatus = cudaDeviceSynchronize();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceSynchronize returned error code %d (%s) after launching addKernel!\n", cudaStatus, cudaGetErrorString(cudaStatus));
goto Error;
}
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CPU_OutputArray, GPU_odata, TotalGPUSize, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy failed!\n");
goto Error;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1);
cudaEventSynchronize(time2);
cudaEventSynchronize(time3);
cudaEventSynchronize(time4);
float totalTime, tfrCPUtoGPU, tfrGPUtoCPU, kernelExecutionTime;
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecutionTime, time2, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
Runtimes[0] = totalTime;
Runtimes[1] = tfrCPUtoGPU;
Runtimes[2] = kernelExecutionTime;
Runtimes[3] = tfrGPUtoCPU;
Error:
cudaFree(GPU_odata);
cudaFree(GPU_idata);
cudaFree(GPU_zerodata);
cudaEventDestroy(time1);
cudaEventDestroy(time2);
cudaEventDestroy(time3);
cudaEventDestroy(time4);
a++;
return cudaStatus;
}
|
0adbca43c2f905c72abff7f0862e866dbf5eed0c.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cap), dim3(gridBlock),dim3(threadBlock), 0, 0, n,a,b,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 0adbca43c2f905c72abff7f0862e866dbf5eed0c.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cap.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
float *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cap<<<gridBlock,threadBlock>>>(n,a,b,result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cap<<<gridBlock,threadBlock>>>(n,a,b,result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cap<<<gridBlock,threadBlock>>>(n,a,b,result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
db331e276e1e4ec13d63bbfa8b9070a5b4d84af4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void reduced_sum_axis_zero(const float *input_data, float *output_data, int input_n, int output_n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < output_n) {
output_data[idx] = 0.0;
for (int i = 0; i < input_n / output_n; i++) {
output_data[idx] += input_data[i * output_n + idx];
}
}
} | db331e276e1e4ec13d63bbfa8b9070a5b4d84af4.cu | #include "includes.h"
/* TODO: Your code here */
/* all your GPU kernel code, e.g. matrix_softmax_cross_entropy_kernel */
// y = inputs[0], y_ = inputs[1]
// np.mean(-np.sum(y_ * np.log(softmax(y)), axis=1), keepdims=True)
__global__ void reduced_sum_axis_zero(const float *input_data, float *output_data, int input_n, int output_n) {
int idx = blockDim.x * blockIdx.x + threadIdx.x;
if (idx < output_n) {
output_data[idx] = 0.0;
for (int i = 0; i < input_n / output_n; i++) {
output_data[idx] += input_data[i * output_n + idx];
}
}
} |
9922f6c27764afc28d3d21940f08f81bbfde0dc5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/filler_op.h"
namespace caffe2 {
namespace {
__global__ void FillRangeKernel(const int n, float* data) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = index;
}
}
}
template <>
bool RangeFillOp<float, CUDAContext>::Fill(
TensorCUDA* output) {
int N = output->size();
hipLaunchKernelGGL(( FillRangeKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(),
N, output->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(GivenTensorFill, GivenTensorFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
| 9922f6c27764afc28d3d21940f08f81bbfde0dc5.cu | #include "caffe2/core/context_gpu.h"
#include "caffe2/operators/filler_op.h"
namespace caffe2 {
namespace {
__global__ void FillRangeKernel(const int n, float* data) {
CUDA_1D_KERNEL_LOOP(index, n) {
data[index] = index;
}
}
}
template <>
bool RangeFillOp<float, CUDAContext>::Fill(
TensorCUDA* output) {
int N = output->size();
FillRangeKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(
N, output->mutable_data<float>());
return true;
}
namespace {
REGISTER_CUDA_OPERATOR(UniformFill, UniformFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(UniformIntFill, UniformFillOp<int, CUDAContext>);
REGISTER_CUDA_OPERATOR(ConstantFill, ConstantFillOp<CUDAContext>);
REGISTER_CUDA_OPERATOR(GivenTensorFill, GivenTensorFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(GaussianFill, GaussianFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(XavierFill, XavierFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(MSRAFill, MSRAFillOp<float, CUDAContext>);
REGISTER_CUDA_OPERATOR(RangeFill, RangeFillOp<float, CUDAContext>);
} // namespace
} // namespace caffe2
|
5688e07e019dabe8007da7eb3502e1e43cc48413.hip | // !!! This is a file automatically generated by hipify!!!
/*
Carlos Ros Vera <[email protected]>
*/
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "adentu-atom.h"
#include "adentu-model.h"
#include "adentu-grid.h"
#include "adentu-event.h"
#include "vec3.h"
#include "adentu-cuda-utils.h"
extern "C" {
#include "adentu-event-mpc-cuda.h"
#include "vec3-cuda.h"
}
__global__ void adentu_event_mpc_cuda_integrate_kernel (vec3f *pos,
vec3f *vel,
double dT,
vec3f accel,
int nAtoms);
extern "C"
void adentu_event_mpc_cuda_integrate (AdentuAtom *fluid,
AdentuGrid *grid,
const vec3f accel,
const double dT)
{
vec3f *d_pos, *pos = fluid->pos;
vec3f *d_vel, *vel = fluid->vel;
//vec3f accel = model->accel;
//double dT = model->dT;
int nAtoms = fluid->n;
CUDA_CALL (hipMalloc ((void **)&d_pos, nAtoms * sizeof (vec3f)));
CUDA_CALL (hipMemcpy (d_pos, pos, nAtoms * sizeof (vec3f), hipMemcpyHostToDevice));
CUDA_CALL (hipMalloc ((void **)&d_vel, nAtoms * sizeof (vec3f)));
CUDA_CALL (hipMemcpy (d_vel, vel, nAtoms * sizeof (vec3f), hipMemcpyHostToDevice));
dim3 gDim;
dim3 bDim;
adentu_cuda_set_grid (&gDim, &bDim, nAtoms);
//g_message ("Integrating %d atoms.", nAtoms);
hipLaunchKernelGGL(( adentu_event_mpc_cuda_integrate_kernel), dim3(gDim), dim3(bDim), 0, 0, d_pos,
d_vel,
dT,
accel,
nAtoms);
CUDA_CALL (hipMemcpy (vel, d_vel, nAtoms * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipMemcpy (pos, d_pos, nAtoms * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipFree (d_vel));
CUDA_CALL (hipFree (d_pos));
}
__global__ void adentu_event_mpc_cuda_integrate_kernel (vec3f *pos,
vec3f *vel,
double dT,
vec3f accel,
int nAtoms)
{
int idx = threadIdx.x + blockIdx.x * gridDim.x;
if (idx >= nAtoms)
return ;
vec3f oldVel, newVel, newPos;
oldVel = newVel = vel[idx];
newPos = pos[idx];
newVel.x += (accel.x * dT);
newVel.y += (accel.y * dT);
newVel.z += (accel.z * dT);
newPos.x += (oldVel.x * dT + 0.5 * accel.x * dT * dT);
newPos.y += (oldVel.y * dT + 0.5 * accel.y * dT * dT);
newPos.z += (oldVel.z * dT + 0.5 * accel.z * dT * dT);
pos[idx] = newPos;
vel[idx] = newVel;
}
__global__ void adentu_event_mpc_cuda_vcm_kernel (vec3f *vcm,
vec3f *vel,
vec3f *velRel,
vec3f *nhat,
double alpha,
int *head,
int *linked,
int tCell,
int nAtoms);
extern "C"
void adentu_event_mpc_cuda (AdentuModel *model)
{
AdentuAtom *fluid = model->fluid;
AdentuGrid *grid = model->mpcGrid;
vec3f *vel = fluid->vel, *d_vel;
vec3f *vcm = grid->cells.vcm, *d_vcm;
vec3f *nhat = grid->cells.nhat, *d_nhat;
vec3f *velRel = fluid->velRel, *d_velRel;
int *head = grid->head, *d_head;
int *linked = grid->linked, *d_linked;
int nAtoms = fluid->n;
int tCell = grid->tCell;
double alpha = model->alpha;
CUDA_CALL (hipMalloc ((void **)&d_vel, nAtoms * sizeof (vec3f)));
CUDA_CALL (hipMemcpy (d_vel, vel, nAtoms * sizeof (vec3f), hipMemcpyHostToDevice));
CUDA_CALL (hipMalloc ((void **)&d_vcm, tCell * sizeof (vec3f)));
CUDA_CALL (hipMalloc ((void **)&d_nhat, tCell * sizeof (vec3f)));
CUDA_CALL (hipMalloc ((void **)&d_velRel, nAtoms * sizeof (vec3f)));
CUDA_CALL (hipMalloc ((void **)&d_head, tCell * sizeof (int)));
CUDA_CALL (hipMemcpy (d_head, head, tCell * sizeof (int), hipMemcpyHostToDevice));
CUDA_CALL (hipMalloc ((void **)&d_linked, nAtoms * sizeof (int)));
CUDA_CALL (hipMemcpy (d_linked, linked, nAtoms * sizeof (int), hipMemcpyHostToDevice));
/* set random axis */
vRand3f_cuda (d_nhat, tCell);
dim3 gDim;
dim3 bDim;
adentu_cuda_set_grid (&gDim, &bDim, tCell);
hipLaunchKernelGGL(( adentu_event_mpc_cuda_vcm_kernel), dim3(gDim), dim3(bDim), 0, 0, d_vcm,
d_vel,
d_velRel,
d_nhat,
alpha,
d_head,
d_linked,
tCell,
nAtoms);
CUDA_CALL (hipMemcpy (vcm, d_vcm, tCell * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipMemcpy (nhat, d_nhat, tCell * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipMemcpy (vel, d_vel, nAtoms * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipMemcpy (velRel, d_velRel, nAtoms * sizeof (vec3f), hipMemcpyDeviceToHost));
CUDA_CALL (hipFree (d_vcm));
CUDA_CALL (hipFree (d_vel));
CUDA_CALL (hipFree (d_velRel));
CUDA_CALL (hipFree (d_nhat));
CUDA_CALL (hipFree (d_head));
CUDA_CALL (hipFree (d_linked));
}
__global__ void adentu_event_mpc_cuda_vcm_kernel (vec3f *vcm,
vec3f *vel,
vec3f *velRel,
vec3f *nhat,
double alpha,
int *head,
int *linked,
int tCell,
int nAtoms)
{
int idx = threadIdx.x + blockIdx.x * gridDim.x;
if (idx >= tCell)
return ;
vec3f Vcm, Vel, VelRel, vR, vRG, vRnhat;
double velRelnhat;
vec3f Nhat = nhat[idx];
vecSet (Vcm, 0.0, 0.0, 0.0);
vecSet (Vel, 0.0, 0.0, 0.0);
vecSet (VelRel, 0.0, 0.0, 0.0);
vecSet (vR, 0.0, 0.0, 0.0);
vecSet (vRG, 0.0, 0.0, 0.0);
vecSet (vRnhat, 0.0, 0.0, 0.0);
/* Calculate vcm */
int j, i = head[idx];
j = i;
while (i != -1)
{
Vcm.x += vel[i].x;
Vcm.y += vel[i].y;
Vcm.z += vel[i].z;
i = linked[i];
}
if (j != -1)
{
Vcm.x /= nAtoms;
Vcm.y /= nAtoms;
Vcm.z /= nAtoms;
} else
Vcm.x = Vcm.y = Vcm.z = 0.0;
__syncthreads ();
/* Now calculates the Relative Velocity */
i = j;
while (i != -1)
{
VelRel.x = vel[i].x - Vcm.x;
VelRel.y = vel[i].y - Vcm.y;
VelRel.z = vel[i].z - Vcm.z;
velRel[i] = VelRel;
i = linked[i];
}
__syncthreads ();
/* Now calculate rotated relative velocities */
i = j;
while (i != -1)
{
VelRel = velRel[i];
velRelnhat = vecDot (VelRel, Nhat);
vR.x = VelRel.x - velRelnhat * Nhat.x;
vR.y = VelRel.y - velRelnhat * Nhat.y;
vR.z = VelRel.z - velRelnhat * Nhat.z;
vecCross (vRnhat, Nhat, vR);
vRG.x = cos (alpha) * vR.x + sin (alpha) * vRnhat.x;
vRG.y = cos (alpha) * vR.y + sin (alpha) * vRnhat.y;
vRG.z = cos (alpha) * vR.z + sin (alpha) * vRnhat.z;
Vel.x = vRG.x + velRelnhat * Nhat.x + Vcm.x;
Vel.y = vRG.y + velRelnhat * Nhat.y + Vcm.y;
Vel.x = vRG.z + velRelnhat * Nhat.z + Vcm.z;
vel[i] = Vel;
i = linked[i];
}
vcm[idx] = Vcm;
}
| 5688e07e019dabe8007da7eb3502e1e43cc48413.cu | /*
Carlos Ríos Vera <[email protected]>
*/
#include <cuda.h>
#include <curand.h>
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#include "adentu-atom.h"
#include "adentu-model.h"
#include "adentu-grid.h"
#include "adentu-event.h"
#include "vec3.h"
#include "adentu-cuda-utils.h"
extern "C" {
#include "adentu-event-mpc-cuda.h"
#include "vec3-cuda.h"
}
__global__ void adentu_event_mpc_cuda_integrate_kernel (vec3f *pos,
vec3f *vel,
double dT,
vec3f accel,
int nAtoms);
extern "C"
void adentu_event_mpc_cuda_integrate (AdentuAtom *fluid,
AdentuGrid *grid,
const vec3f accel,
const double dT)
{
vec3f *d_pos, *pos = fluid->pos;
vec3f *d_vel, *vel = fluid->vel;
//vec3f accel = model->accel;
//double dT = model->dT;
int nAtoms = fluid->n;
CUDA_CALL (cudaMalloc ((void **)&d_pos, nAtoms * sizeof (vec3f)));
CUDA_CALL (cudaMemcpy (d_pos, pos, nAtoms * sizeof (vec3f), cudaMemcpyHostToDevice));
CUDA_CALL (cudaMalloc ((void **)&d_vel, nAtoms * sizeof (vec3f)));
CUDA_CALL (cudaMemcpy (d_vel, vel, nAtoms * sizeof (vec3f), cudaMemcpyHostToDevice));
dim3 gDim;
dim3 bDim;
adentu_cuda_set_grid (&gDim, &bDim, nAtoms);
//g_message ("Integrating %d atoms.", nAtoms);
adentu_event_mpc_cuda_integrate_kernel<<<gDim, bDim>>> (d_pos,
d_vel,
dT,
accel,
nAtoms);
CUDA_CALL (cudaMemcpy (vel, d_vel, nAtoms * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaMemcpy (pos, d_pos, nAtoms * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaFree (d_vel));
CUDA_CALL (cudaFree (d_pos));
}
__global__ void adentu_event_mpc_cuda_integrate_kernel (vec3f *pos,
vec3f *vel,
double dT,
vec3f accel,
int nAtoms)
{
int idx = threadIdx.x + blockIdx.x * gridDim.x;
if (idx >= nAtoms)
return ;
vec3f oldVel, newVel, newPos;
oldVel = newVel = vel[idx];
newPos = pos[idx];
newVel.x += (accel.x * dT);
newVel.y += (accel.y * dT);
newVel.z += (accel.z * dT);
newPos.x += (oldVel.x * dT + 0.5 * accel.x * dT * dT);
newPos.y += (oldVel.y * dT + 0.5 * accel.y * dT * dT);
newPos.z += (oldVel.z * dT + 0.5 * accel.z * dT * dT);
pos[idx] = newPos;
vel[idx] = newVel;
}
__global__ void adentu_event_mpc_cuda_vcm_kernel (vec3f *vcm,
vec3f *vel,
vec3f *velRel,
vec3f *nhat,
double alpha,
int *head,
int *linked,
int tCell,
int nAtoms);
extern "C"
void adentu_event_mpc_cuda (AdentuModel *model)
{
AdentuAtom *fluid = model->fluid;
AdentuGrid *grid = model->mpcGrid;
vec3f *vel = fluid->vel, *d_vel;
vec3f *vcm = grid->cells.vcm, *d_vcm;
vec3f *nhat = grid->cells.nhat, *d_nhat;
vec3f *velRel = fluid->velRel, *d_velRel;
int *head = grid->head, *d_head;
int *linked = grid->linked, *d_linked;
int nAtoms = fluid->n;
int tCell = grid->tCell;
double alpha = model->alpha;
CUDA_CALL (cudaMalloc ((void **)&d_vel, nAtoms * sizeof (vec3f)));
CUDA_CALL (cudaMemcpy (d_vel, vel, nAtoms * sizeof (vec3f), cudaMemcpyHostToDevice));
CUDA_CALL (cudaMalloc ((void **)&d_vcm, tCell * sizeof (vec3f)));
CUDA_CALL (cudaMalloc ((void **)&d_nhat, tCell * sizeof (vec3f)));
CUDA_CALL (cudaMalloc ((void **)&d_velRel, nAtoms * sizeof (vec3f)));
CUDA_CALL (cudaMalloc ((void **)&d_head, tCell * sizeof (int)));
CUDA_CALL (cudaMemcpy (d_head, head, tCell * sizeof (int), cudaMemcpyHostToDevice));
CUDA_CALL (cudaMalloc ((void **)&d_linked, nAtoms * sizeof (int)));
CUDA_CALL (cudaMemcpy (d_linked, linked, nAtoms * sizeof (int), cudaMemcpyHostToDevice));
/* set random axis */
vRand3f_cuda (d_nhat, tCell);
dim3 gDim;
dim3 bDim;
adentu_cuda_set_grid (&gDim, &bDim, tCell);
adentu_event_mpc_cuda_vcm_kernel<<<gDim, bDim>>> (d_vcm,
d_vel,
d_velRel,
d_nhat,
alpha,
d_head,
d_linked,
tCell,
nAtoms);
CUDA_CALL (cudaMemcpy (vcm, d_vcm, tCell * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaMemcpy (nhat, d_nhat, tCell * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaMemcpy (vel, d_vel, nAtoms * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaMemcpy (velRel, d_velRel, nAtoms * sizeof (vec3f), cudaMemcpyDeviceToHost));
CUDA_CALL (cudaFree (d_vcm));
CUDA_CALL (cudaFree (d_vel));
CUDA_CALL (cudaFree (d_velRel));
CUDA_CALL (cudaFree (d_nhat));
CUDA_CALL (cudaFree (d_head));
CUDA_CALL (cudaFree (d_linked));
}
__global__ void adentu_event_mpc_cuda_vcm_kernel (vec3f *vcm,
vec3f *vel,
vec3f *velRel,
vec3f *nhat,
double alpha,
int *head,
int *linked,
int tCell,
int nAtoms)
{
int idx = threadIdx.x + blockIdx.x * gridDim.x;
if (idx >= tCell)
return ;
vec3f Vcm, Vel, VelRel, vR, vRG, vRnhat;
double velRelnhat;
vec3f Nhat = nhat[idx];
vecSet (Vcm, 0.0, 0.0, 0.0);
vecSet (Vel, 0.0, 0.0, 0.0);
vecSet (VelRel, 0.0, 0.0, 0.0);
vecSet (vR, 0.0, 0.0, 0.0);
vecSet (vRG, 0.0, 0.0, 0.0);
vecSet (vRnhat, 0.0, 0.0, 0.0);
/* Calculate vcm */
int j, i = head[idx];
j = i;
while (i != -1)
{
Vcm.x += vel[i].x;
Vcm.y += vel[i].y;
Vcm.z += vel[i].z;
i = linked[i];
}
if (j != -1)
{
Vcm.x /= nAtoms;
Vcm.y /= nAtoms;
Vcm.z /= nAtoms;
} else
Vcm.x = Vcm.y = Vcm.z = 0.0;
__syncthreads ();
/* Now calculates the Relative Velocity */
i = j;
while (i != -1)
{
VelRel.x = vel[i].x - Vcm.x;
VelRel.y = vel[i].y - Vcm.y;
VelRel.z = vel[i].z - Vcm.z;
velRel[i] = VelRel;
i = linked[i];
}
__syncthreads ();
/* Now calculate rotated relative velocities */
i = j;
while (i != -1)
{
VelRel = velRel[i];
velRelnhat = vecDot (VelRel, Nhat);
vR.x = VelRel.x - velRelnhat * Nhat.x;
vR.y = VelRel.y - velRelnhat * Nhat.y;
vR.z = VelRel.z - velRelnhat * Nhat.z;
vecCross (vRnhat, Nhat, vR);
vRG.x = cos (alpha) * vR.x + sin (alpha) * vRnhat.x;
vRG.y = cos (alpha) * vR.y + sin (alpha) * vRnhat.y;
vRG.z = cos (alpha) * vR.z + sin (alpha) * vRnhat.z;
Vel.x = vRG.x + velRelnhat * Nhat.x + Vcm.x;
Vel.y = vRG.y + velRelnhat * Nhat.y + Vcm.y;
Vel.x = vRG.z + velRelnhat * Nhat.z + Vcm.z;
vel[i] = Vel;
i = linked[i];
}
vcm[idx] = Vcm;
}
|
3e69da85b16b9ef21dcc56e5ab86beaedb5d3b43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <thrust/complex.h>
using namespace thrust;
extern "C"
{
__global__ void CUDAlogkernel(const double a, const double b, const int nu, const double *u, double *x, double *y, double *ret)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int n = sizeof(x)/sizeof(x[0]);
const double pi = M_PI;
const double lengthd = abs(b-a);
const double C = 0.5*lengthd;
complex<double> *z,*yv,*yk,*ykp1;
z = new complex<double>[n];
yv = new complex<double>[n];
yk = new complex<double>[n];
ykp1 = new complex<double>[n];
z[i] = complex<double>(x[i],y[i]);
z[i] = (a + b - 2.0*z[i])/(a - b); // tocanonical(u,z)
if (z[i].real() <= 1.0 && z[i].real() >= -1.0 && abs(z[i].imag()) <= 2.0e-14) {
yv[i] = z[i]+complex<double>(0.0,1.0)*sqrt(1.0-z[i])*sqrt(z[i]+1.0);
}
else {
yv[i] = z[i] - sqrt(z[i]-1.0)*sqrt(z[i]+1.0); // updownjoukowskyinverse(true,z)
}
yk[i] = yv[i];
ykp1[i] = yk[i]*yk[i];
if ( nu >= 0 ) {
ret[i] = -u[0]*log(abs(2.0*yk[i]/C)); // -logabs(2y/C)
if ( nu >= 1 ) {
ret[i] += -u[1]*yk[i].real(); // -real(yk)
if ( nu >= 2 ) {
ret[i] += u[2]*(log(abs(2.0*yk[i]/C))-0.5*ykp1[i].real()); // -ret[1]-.5real(ykp1)
if ( nu >= 3) {
for (int nun = 3; nun<nu; nun++) {
ykp1[i] *= yv[i];
ret[i] += u[nun]*( yk[i].real()/(nun-2.0)-ykp1[i].real()/(nun-0.0) ); // real(yk)/(n-3)-real(ykp1)/(n-1)
yk[i] *= yv[i];
}
}
}
}
}
ret[i] *= pi*C;
}
} // extern "C"
| 3e69da85b16b9ef21dcc56e5ab86beaedb5d3b43.cu | #include <thrust/complex.h>
using namespace thrust;
extern "C"
{
__global__ void CUDAlogkernel(const double a, const double b, const int nu, const double *u, double *x, double *y, double *ret)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
int n = sizeof(x)/sizeof(x[0]);
const double pi = M_PI;
const double lengthd = abs(b-a);
const double C = 0.5*lengthd;
complex<double> *z,*yv,*yk,*ykp1;
z = new complex<double>[n];
yv = new complex<double>[n];
yk = new complex<double>[n];
ykp1 = new complex<double>[n];
z[i] = complex<double>(x[i],y[i]);
z[i] = (a + b - 2.0*z[i])/(a - b); // tocanonical(u,z)
if (z[i].real() <= 1.0 && z[i].real() >= -1.0 && abs(z[i].imag()) <= 2.0e-14) {
yv[i] = z[i]+complex<double>(0.0,1.0)*sqrt(1.0-z[i])*sqrt(z[i]+1.0);
}
else {
yv[i] = z[i] - sqrt(z[i]-1.0)*sqrt(z[i]+1.0); // updownjoukowskyinverse(true,z)
}
yk[i] = yv[i];
ykp1[i] = yk[i]*yk[i];
if ( nu >= 0 ) {
ret[i] = -u[0]*log(abs(2.0*yk[i]/C)); // -logabs(2y/C)
if ( nu >= 1 ) {
ret[i] += -u[1]*yk[i].real(); // -real(yk)
if ( nu >= 2 ) {
ret[i] += u[2]*(log(abs(2.0*yk[i]/C))-0.5*ykp1[i].real()); // -ret[1]-.5real(ykp1)
if ( nu >= 3) {
for (int nun = 3; nun<nu; nun++) {
ykp1[i] *= yv[i];
ret[i] += u[nun]*( yk[i].real()/(nun-2.0)-ykp1[i].real()/(nun-0.0) ); // real(yk)/(n-3)-real(ykp1)/(n-1)
yk[i] *= yv[i];
}
}
}
}
}
ret[i] *= pi*C;
}
} // extern "C"
|
1da6a17b2528c2579b157a8fd9e1c30b136e7912.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_cuda", [&]() {
BitwiseAndFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_cuda", [&]() {
BitwiseOrFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_cuda", [&]() {
BitwiseXorFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
}} // namespace at::native
| 1da6a17b2528c2579b157a8fd9e1c30b136e7912.cu | #include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
template<typename scalar_t>
struct BitwiseAndFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a & b;
}
};
template<>
struct BitwiseAndFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a && b;
}
};
void bitwise_and_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_and_cuda", [&]() {
BitwiseAndFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
template<typename scalar_t>
struct BitwiseOrFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a | b;
}
};
template<>
struct BitwiseOrFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a || b;
}
};
void bitwise_or_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_or_cuda", [&]() {
BitwiseOrFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
template<typename scalar_t>
struct BitwiseXorFunctor {
__device__ __forceinline__ scalar_t operator()(scalar_t a, scalar_t b) const {
return a ^ b;
}
};
template<>
struct BitwiseXorFunctor<bool> {
__device__ __forceinline__ bool operator()(bool a, bool b) const {
return a != b;
}
};
void bitwise_xor_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_INTEGRAL_TYPES_AND(kBool, iter.dtype(), "bitwise_xor_cuda", [&]() {
BitwiseXorFunctor<scalar_t> f;
gpu_kernel_with_scalars(iter, f);
});
}
REGISTER_DISPATCH(bitwise_and_stub, &bitwise_and_kernel_cuda);
REGISTER_DISPATCH(bitwise_or_stub, &bitwise_or_kernel_cuda);
REGISTER_DISPATCH(bitwise_xor_stub, &bitwise_xor_kernel_cuda);
}} // namespace at::native
|
1455921cab21e03253d6159a99ce5d0061e506b4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_cuda_utils.h"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace platform {
struct CUDAPlace;
struct float16;
} // namespace platform
} // namespace paddle
namespace paddle {
namespace operators {
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
static inline int SizeOutAxis(const int axis, DDim dims) {
int size = 1;
for (int i = axis + 1; i < dims.size(); i++) {
size *= dims[i];
}
return size;
}
template <typename T, int VLEN>
union vec_t {
static_assert(sizeof(T) == -1, "vec_t is only available by specialization.");
};
template <>
union vec_t<float, 4> {
float4 s;
float v[4];
};
template <>
union vec_t<platform::float16, 4> {
int2 s;
platform::float16 v[4];
};
template <typename T, typename VECT, int VPT, int WARP_PER_BLOCK>
__global__ void VecSoftmaxForward(T* dst, const T* src, const int batch_size,
const int softmax_ele) {
int offset = blockIdx.x * softmax_ele * WARP_PER_BLOCK;
int idx = threadIdx.x * VPT;
VECT buf = reinterpret_cast<const VECT*>(&src[offset + idx])[0];
T* bufp = reinterpret_cast<T*>(&buf);
float4 val4;
float* val4p = reinterpret_cast<float*>(&val4);
for (int i = 0; i < VPT; ++i) {
val4p[i] = static_cast<float>(bufp[i]);
}
float val = val4.x + val4.y + val4.z + val4.w;
float max_val = math::warpReduceMax<float>(
max(max(val4.x, val4.y), max(val4.z, val4.w)), 0xffffffff);
float4 tmp4 = make_float4(__expf(val4.x - max_val), __expf(val4.y - max_val),
__expf(val4.z - max_val), __expf(val4.w - max_val));
float* tmp4p = reinterpret_cast<float*>(&tmp4);
float invsum = 1.f / (math::warpReduceSum<float>(
tmp4.x + tmp4.y + tmp4.z + tmp4.w, 0xffffffff) +
1e-6f);
for (int i = 0; i < VPT; ++i) {
bufp[i] = static_cast<T>(tmp4p[i] * invsum);
}
reinterpret_cast<VECT*>(&dst[offset + idx])[0] = buf;
}
template <typename T, int VPT, int WARP_PER_BLOCK>
__global__ void VecSoftmaxBackward(T* dst, const T* grad, const T* src,
const int batch_size,
const int softmax_ele) {
const int offset =
blockIdx.x * softmax_ele * WARP_PER_BLOCK + threadIdx.x * VPT;
float local_sum_gy = 0.f;
vec_t<T, VPT> local_grad;
vec_t<T, VPT> local_src;
local_grad.s =
reinterpret_cast<const decltype(local_grad.s)*>(&grad[offset])[0];
local_src.s = reinterpret_cast<const decltype(local_src.s)*>(&src[offset])[0];
for (int i = 0; i < VPT; ++i) {
local_sum_gy += static_cast<float>(local_grad.v[i]) *
static_cast<float>(local_src.v[i]);
}
float sum_gy = math::warpReduceSum<float>(local_sum_gy, 0xffffffff);
vec_t<T, VPT> local_dst;
for (int i = 0; i < VPT; ++i) {
local_dst.v[i] =
static_cast<T>(static_cast<float>(local_src.v[i]) *
(static_cast<float>(local_grad.v[i]) - sum_gy));
}
reinterpret_cast<decltype(local_dst.s)*>(&dst[offset])[0] = local_dst.s;
}
template <typename T>
class SoftmaxCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto dims = x->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int warps_per_block = 4;
if (D == 1 && dim == 128 && N % warps_per_block == 0 && sizeof(T) <= 4) {
// a warp for a batch, 4 elements for a thread, only support the softmax
// dim size = 128 currently
if (sizeof(T) == 2) {
hipLaunchKernelGGL(( VecSoftmaxForward<
T, int2, 4,
warps_per_block>), dim3(N / warps_per_block), dim3(warps_per_block * WARP_SIZE),
0, ctx.cuda_device_context().stream(),
out_data, x->data<T>(), N, dim);
} else if (sizeof(T) == 4) {
hipLaunchKernelGGL(( VecSoftmaxForward<
T, int4, 4,
warps_per_block>), dim3(N / warps_per_block), dim3(warps_per_block * WARP_SIZE),
0, ctx.cuda_device_context().stream(),
out_data, x->data<T>(), N, dim);
} else {
assert(false && "not support");
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data));
}
}
};
template <typename T>
class SoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
auto* dx_data = dx->data<T>();
auto dims = out->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int warps_per_block = 4;
constexpr bool warp_softmax_available =
std::is_same<T, float>::value ||
std::is_same<T, platform::float16>::value;
if (D == 1 && dim == 128 && N % warps_per_block == 0 &&
warp_softmax_available) {
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( VecSoftmaxBackward<
float, 4,
warps_per_block>), dim3(N / warps_per_block), dim3(warps_per_block * WARP_SIZE),
0, ctx.cuda_device_context().stream(),
dx->data<float>(), dout->data<float>(), out->data<float>(), N, dim);
} else if (std::is_same<T, platform::float16>::value) {
hipLaunchKernelGGL(( VecSoftmaxBackward<
platform::float16, 4,
warps_per_block>), dim3(N / warps_per_block), dim3(warps_per_block * WARP_SIZE),
0, ctx.cuda_device_context().stream(),
dx->data<platform::float16>(), dout->data<platform::float16>(),
out->data<platform::float16>(), N, dim);
} else {
PADDLE_ENFORCE_EQ(
warp_softmax_available, true,
platform::errors::Unimplemented(
"Warp softmax backward is only available for fp32 and fp16"));
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(), desc_,
dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<double>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<double>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
| 1455921cab21e03253d6159a99ce5d0061e506b4.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/operators/math/math_cuda_utils.h"
#include "paddle/fluid/operators/softmax_op.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace platform {
struct CUDAPlace;
struct float16;
} // namespace platform
} // namespace paddle
namespace paddle {
namespace operators {
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using DataLayout = platform::DataLayout;
using Tensor = framework::Tensor;
static inline int SizeOutAxis(const int axis, DDim dims) {
int size = 1;
for (int i = axis + 1; i < dims.size(); i++) {
size *= dims[i];
}
return size;
}
template <typename T, int VLEN>
union vec_t {
static_assert(sizeof(T) == -1, "vec_t is only available by specialization.");
};
template <>
union vec_t<float, 4> {
float4 s;
float v[4];
};
template <>
union vec_t<platform::float16, 4> {
int2 s;
platform::float16 v[4];
};
template <typename T, typename VECT, int VPT, int WARP_PER_BLOCK>
__global__ void VecSoftmaxForward(T* dst, const T* src, const int batch_size,
const int softmax_ele) {
int offset = blockIdx.x * softmax_ele * WARP_PER_BLOCK;
int idx = threadIdx.x * VPT;
VECT buf = reinterpret_cast<const VECT*>(&src[offset + idx])[0];
T* bufp = reinterpret_cast<T*>(&buf);
float4 val4;
float* val4p = reinterpret_cast<float*>(&val4);
for (int i = 0; i < VPT; ++i) {
val4p[i] = static_cast<float>(bufp[i]);
}
float val = val4.x + val4.y + val4.z + val4.w;
float max_val = math::warpReduceMax<float>(
max(max(val4.x, val4.y), max(val4.z, val4.w)), 0xffffffff);
float4 tmp4 = make_float4(__expf(val4.x - max_val), __expf(val4.y - max_val),
__expf(val4.z - max_val), __expf(val4.w - max_val));
float* tmp4p = reinterpret_cast<float*>(&tmp4);
float invsum = 1.f / (math::warpReduceSum<float>(
tmp4.x + tmp4.y + tmp4.z + tmp4.w, 0xffffffff) +
1e-6f);
for (int i = 0; i < VPT; ++i) {
bufp[i] = static_cast<T>(tmp4p[i] * invsum);
}
reinterpret_cast<VECT*>(&dst[offset + idx])[0] = buf;
}
template <typename T, int VPT, int WARP_PER_BLOCK>
__global__ void VecSoftmaxBackward(T* dst, const T* grad, const T* src,
const int batch_size,
const int softmax_ele) {
const int offset =
blockIdx.x * softmax_ele * WARP_PER_BLOCK + threadIdx.x * VPT;
float local_sum_gy = 0.f;
vec_t<T, VPT> local_grad;
vec_t<T, VPT> local_src;
local_grad.s =
reinterpret_cast<const decltype(local_grad.s)*>(&grad[offset])[0];
local_src.s = reinterpret_cast<const decltype(local_src.s)*>(&src[offset])[0];
for (int i = 0; i < VPT; ++i) {
local_sum_gy += static_cast<float>(local_grad.v[i]) *
static_cast<float>(local_src.v[i]);
}
float sum_gy = math::warpReduceSum<float>(local_sum_gy, 0xffffffff);
vec_t<T, VPT> local_dst;
for (int i = 0; i < VPT; ++i) {
local_dst.v[i] =
static_cast<T>(static_cast<float>(local_src.v[i]) *
(static_cast<float>(local_grad.v[i]) - sum_gy));
}
reinterpret_cast<decltype(local_dst.s)*>(&dst[offset])[0] = local_dst.s;
}
template <typename T>
class SoftmaxCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* x = ctx.Input<Tensor>("X");
auto* out = ctx.Output<Tensor>("Out");
out->mutable_data<T>(ctx.GetPlace());
auto* out_data = out->data<T>();
auto dims = x->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int warps_per_block = 4;
if (D == 1 && dim == 128 && N % warps_per_block == 0 && sizeof(T) <= 4) {
// a warp for a batch, 4 elements for a thread, only support the softmax
// dim size = 128 currently
if (sizeof(T) == 2) {
VecSoftmaxForward<
T, int2, 4,
warps_per_block><<<N / warps_per_block, warps_per_block * WARP_SIZE,
0, ctx.cuda_device_context().stream()>>>(
out_data, x->data<T>(), N, dim);
} else if (sizeof(T) == 4) {
VecSoftmaxForward<
T, int4, 4,
warps_per_block><<<N / warps_per_block, warps_per_block * WARP_SIZE,
0, ctx.cuda_device_context().stream()>>>(
out_data, x->data<T>(), N, dim);
} else {
assert(false && "not support");
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxForward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, x->data<T>(),
platform::CudnnDataType<T>::kZero(), desc_, out_data));
}
}
};
template <typename T>
class SoftmaxGradCUDNNKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
auto* out = ctx.Input<Tensor>("Out");
auto* dout = ctx.Input<Tensor>(framework::GradVarName("Out"));
auto* dx = ctx.Output<Tensor>(framework::GradVarName("X"));
dx->mutable_data<T>(ctx.GetPlace());
auto* dx_data = dx->data<T>();
auto dims = out->dims();
const int rank = dims.size();
const int axis = CanonicalAxis(ctx.Attr<int>("axis"), rank);
const int dim = dims[axis];
const int N = SizeToAxis(axis, dims);
const int D = SizeOutAxis(axis, dims);
constexpr int warps_per_block = 4;
constexpr bool warp_softmax_available =
std::is_same<T, float>::value ||
std::is_same<T, platform::float16>::value;
if (D == 1 && dim == 128 && N % warps_per_block == 0 &&
warp_softmax_available) {
if (std::is_same<T, float>::value) {
VecSoftmaxBackward<
float, 4,
warps_per_block><<<N / warps_per_block, warps_per_block * WARP_SIZE,
0, ctx.cuda_device_context().stream()>>>(
dx->data<float>(), dout->data<float>(), out->data<float>(), N, dim);
} else if (std::is_same<T, platform::float16>::value) {
VecSoftmaxBackward<
platform::float16, 4,
warps_per_block><<<N / warps_per_block, warps_per_block * WARP_SIZE,
0, ctx.cuda_device_context().stream()>>>(
dx->data<platform::float16>(), dout->data<platform::float16>(),
out->data<platform::float16>(), N, dim);
} else {
PADDLE_ENFORCE_EQ(
warp_softmax_available, true,
platform::errors::Unimplemented(
"Warp softmax backward is only available for fp32 and fp16"));
}
} else {
ScopedTensorDescriptor desc;
std::vector<int> tensor_dims = {N, dim, D, 1};
DataLayout layout = DataLayout::kNCHW;
cudnnTensorDescriptor_t desc_ = desc.descriptor<T>(layout, tensor_dims);
auto& dev_ctx =
ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
auto mode = axis == rank - 1 ? CUDNN_SOFTMAX_MODE_INSTANCE
: CUDNN_SOFTMAX_MODE_CHANNEL;
PADDLE_ENFORCE_CUDA_SUCCESS(platform::dynload::cudnnSoftmaxBackward(
handle, CUDNN_SOFTMAX_ACCURATE, mode,
platform::CudnnDataType<T>::kOne(), desc_, out->data<T>(), desc_,
dout->data<T>(), platform::CudnnDataType<T>::kZero(), desc_,
dx_data));
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(softmax, CUDNN, plat::CUDAPlace,
ops::SoftmaxCUDNNKernel<float>,
ops::SoftmaxCUDNNKernel<double>,
ops::SoftmaxCUDNNKernel<plat::float16>);
REGISTER_OP_KERNEL(softmax_grad, CUDNN, plat::CUDAPlace,
ops::SoftmaxGradCUDNNKernel<float>,
ops::SoftmaxGradCUDNNKernel<double>,
ops::SoftmaxGradCUDNNKernel<plat::float16>);
|
main_gpu_1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "main.h"
#define BLOCK_SIZE 128
void arrValidation(int _real_sum, int _sum);
// bool checkBaseValue(int _N);
void initValue(int *_arr);
int cpuSumArray(int *_arr);
template <class T>
__global__ void sumArray(T *_arr, T *_oarr);
int main (int argc, char *argv[]) {
float gpu_time;
int true_sum;
int *d_arr, *d_oarr;
hipEvent_t start_t, stop_t;
int blocks = ARR_SIZE/BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid(blocks, 1, 1);
size_t sizeArr = ARR_SIZE*sizeof(int);
size_t smSize = BLOCK_SIZE*sizeof(int);
hipEventCreate(&start_t);
hipEventCreate(&stop_t);
arr = (int*)malloc(sizeArr);
hipMalloc((void **)&d_arr, sizeArr);
hipMalloc((void **)&d_oarr, sizeArr);
srand(time(NULL));
initValue(arr);
true_sum = cpuSumArray(arr);
hipMemcpy(d_arr, arr, sizeArr, hipMemcpyHostToDevice);
int workamount = (blocks > 1) ? ARR_SIZE/BLOCK_SIZE + 1 : 0;
hipEventRecord(start_t);
hipLaunchKernelGGL(( sumArray), dim3(dimGrid), dim3(dimBlock), smSize, 0, d_arr, d_oarr);
for (unsigned int dmmy=0; dmmy < workamount; dmmy++){
hipLaunchKernelGGL(( sumArray), dim3(dimGrid), dim3(dimBlock), smSize, 0, d_oarr, d_oarr);
}
hipEventRecord(stop_t);
hipEventSynchronize(stop_t);
hipEventElapsedTime(&gpu_time, start_t, stop_t);
printf("GPU time = %lf ms\n", gpu_time/ITER_TIMES);
hipMemcpy(arr, d_oarr, sizeArr, hipMemcpyDeviceToHost);
arrValidation(true_sum, arr[0]);
free(arr);
hipFree(d_arr);
return 0;
}
void initValue(int *_arr){
for(unsigned int i=0; i<ARR_SIZE; i++){
_arr[i] = (int)rand() % 9;
}
}
void arrValidation(int _real_sum, int _sum){
std::cout << _real_sum << std::endl;
std::cout << _sum << std::endl;
std::cout << _real_sum - _sum << std::endl;
if (_real_sum == _sum){
std::cout << "array summation is right !\n" << std::endl;
} else{
std::cout << "array summation is wrong !\n" << std::endl;
}
}
int cpuSumArray(int *_arr){
int sum = _arr[0];
for(unsigned int i=1; i<ARR_SIZE; i++){
sum += _arr[i];
}
return sum;
}
template <class T>
__global__ void sumArray(T *_arr, T *_oarr){
__shared__ T _sarr[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
_sarr[tid] = (i < ARR_SIZE) ? _arr[i]+_arr[i+blockDim.x] : 0;
__syncthreads();
for (unsigned int stride = blockDim.x/2; stride > 0 ; stride >>= 1){
if (tid < stride)
_sarr[tid]+=_sarr[tid+stride];
__syncthreads();
}
if (tid == 0) _oarr[blockIdx.x] = _sarr[0];
}
| main_gpu_1.cu | #include <iostream>
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#include "main.h"
#define BLOCK_SIZE 128
void arrValidation(int _real_sum, int _sum);
// bool checkBaseValue(int _N);
void initValue(int *_arr);
int cpuSumArray(int *_arr);
template <class T>
__global__ void sumArray(T *_arr, T *_oarr);
int main (int argc, char *argv[]) {
float gpu_time;
int true_sum;
int *d_arr, *d_oarr;
cudaEvent_t start_t, stop_t;
int blocks = ARR_SIZE/BLOCK_SIZE;
dim3 dimBlock(BLOCK_SIZE, 1, 1);
dim3 dimGrid(blocks, 1, 1);
size_t sizeArr = ARR_SIZE*sizeof(int);
size_t smSize = BLOCK_SIZE*sizeof(int);
cudaEventCreate(&start_t);
cudaEventCreate(&stop_t);
arr = (int*)malloc(sizeArr);
cudaMalloc((void **)&d_arr, sizeArr);
cudaMalloc((void **)&d_oarr, sizeArr);
srand(time(NULL));
initValue(arr);
true_sum = cpuSumArray(arr);
cudaMemcpy(d_arr, arr, sizeArr, cudaMemcpyHostToDevice);
int workamount = (blocks > 1) ? ARR_SIZE/BLOCK_SIZE + 1 : 0;
cudaEventRecord(start_t);
sumArray<<<dimGrid, dimBlock, smSize>>>(d_arr, d_oarr);
for (unsigned int dmmy=0; dmmy < workamount; dmmy++){
sumArray<<<dimGrid, dimBlock, smSize>>>(d_oarr, d_oarr);
}
cudaEventRecord(stop_t);
cudaEventSynchronize(stop_t);
cudaEventElapsedTime(&gpu_time, start_t, stop_t);
printf("GPU time = %lf ms\n", gpu_time/ITER_TIMES);
cudaMemcpy(arr, d_oarr, sizeArr, cudaMemcpyDeviceToHost);
arrValidation(true_sum, arr[0]);
free(arr);
cudaFree(d_arr);
return 0;
}
void initValue(int *_arr){
for(unsigned int i=0; i<ARR_SIZE; i++){
_arr[i] = (int)rand() % 9;
}
}
void arrValidation(int _real_sum, int _sum){
std::cout << _real_sum << std::endl;
std::cout << _sum << std::endl;
std::cout << _real_sum - _sum << std::endl;
if (_real_sum == _sum){
std::cout << "array summation is right !\n" << std::endl;
} else{
std::cout << "array summation is wrong !\n" << std::endl;
}
}
int cpuSumArray(int *_arr){
int sum = _arr[0];
for(unsigned int i=1; i<ARR_SIZE; i++){
sum += _arr[i];
}
return sum;
}
template <class T>
__global__ void sumArray(T *_arr, T *_oarr){
__shared__ T _sarr[BLOCK_SIZE];
unsigned int tid = threadIdx.x;
unsigned int i = blockIdx.x*blockDim.x+threadIdx.x;
_sarr[tid] = (i < ARR_SIZE) ? _arr[i]+_arr[i+blockDim.x] : 0;
__syncthreads();
for (unsigned int stride = blockDim.x/2; stride > 0 ; stride >>= 1){
if (tid < stride)
_sarr[tid]+=_sarr[tid+stride];
__syncthreads();
}
if (tid == 0) _oarr[blockIdx.x] = _sarr[0];
}
|
e39ccdf2f3b78d6c57dc65c2e353d516687bb681.hip | // !!! This is a file automatically generated by hipify!!!
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <hip/hip_runtime.h>
#include <cusparse_v2.h>
#include "rocblas.h"
#include <hiprand/hiprand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
/*
* log_p_y
* log likelihood for a poisson
* y = observed count
* dt = length of observation
*/
__device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt) {
return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1)
}
/* kcSampleSMStates
* kernel runs on each trial (not timebin)
* outputs:
* z = jump times per each trial
* s = which state jumped to
* sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial)
* inputs
* y = spike counts
* trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1]
* y is indexed at 0. This array includes final value that should be length of y)
* trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability)
* coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB
* NT = number of trials
* alpha = (3,1) array, spike rates
* phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent
* delta_t = length of each timebin
* maxJump = the longest to calculate out possible jump time values for
* randU = (NT,1) array a set of uniform random numbers on [0,1]
*
* nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences
*
* jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over
*/
__global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
int T1 = trialIndex[idx];
int T = trialIndex[idx+1]-T1;
//index in jumpToProbs for jumping to state 2
int jumpT1_2 = idx*(maxJump*2);
//index in jumpToProbs for jumping to state 3
int jumpT1_3 = idx*(maxJump*2) + maxJump;
int cohIndex = trialCoh[idx]*maxJump;
KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0;
KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]);
//calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end)
for(int ii = T-1; ii >= 0; ii--) {
//taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3)
jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[1],delta_t) ;
jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[2],delta_t) ;
}
KC_FP_TYPE initStateCumsum = 0;
KC_FP_TYPE maxLog = 0;
for(int ii = 0; ii < maxJump; ii++) {
// p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0
if(ii < T) {
KC_FP_TYPE p_y_init = log_p_y(y[T1+ii],alphas[0],delta_t);
initStateCumsum += p_y_init;
if(ii < T-1) {
jumpToProbs[jumpT1_2+ii+1] += initStateCumsum;
jumpToProbs[jumpT1_3+ii+1] += initStateCumsum;
}
}
else {
jumpToProbs[jumpT1_2+ii] = initStateCumsum;
jumpToProbs[jumpT1_3+ii] = initStateCumsum;
}
jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2;
jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3;
maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]);
//maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii];
}
//maxLog /= (maxJump*2.0);
KC_FP_TYPE maxNumToExp = 8;
KC_FP_TYPE minNumToExp = 2;
KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method)
if(maxLog > maxNumToExp) {
extraConst = maxLog-maxNumToExp;
}
else if(maxLog < minNumToExp) {
extraConst = minNumToExp-maxLog;
}
KC_FP_TYPE totalProbCumsum = 0;
for(int ii = 0; ii < maxJump; ii++) {
jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst);
if(phi[trialCoh[idx]] < 1.0) {
jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst);
totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii];
}
else {
totalProbCumsum += jumpToProbs[jumpT1_3+ii];
jumpToProbs[jumpT1_2+ii] = 0.0;
}
}
//goes back through and finds a sampling time + sample to state
KC_FP_TYPE post_cdf = 0;
int switchFound = -1;
int switchTime = 0;
KC_FP_TYPE randn = randU[idx] * totalProbCumsum;
for(int ii = 0; ii < maxJump && switchFound < 1; ii++) {
post_cdf += jumpToProbs[jumpT1_2+ii];
if(post_cdf > randn && phi[trialCoh[idx]] < 1) {
switchFound = 2;
switchTime = ii;
}
else {
post_cdf += jumpToProbs[jumpT1_3+ii];
if(post_cdf > randn) {
switchFound = 3;
switchTime = ii;
}
}
}
if(switchFound <= 0) {
//just to make sure it doesn't crash
switchFound = (KC_LOG(randU[idx])>p3)?2:3;
switchTime = 101;
}
s[idx] = switchFound;
z[idx] = switchTime;
//sum up observed spike count info
sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T);
sampleStats[idx*6+3] = 0;
sampleStats[idx*6+4] = 0;
sampleStats[idx*6+5] = 0;
if(switchFound == 2) {
sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ;
sampleStats[idx*6+2] = 0.0;
for(int ii = 0; ii < T;ii++) {
if(ii<switchTime) {
sampleStats[idx*6+3] += y[T1+ii];
}
else {
sampleStats[idx*6+4] += y[T1+ii];
}
}
}
else {
sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ;
sampleStats[idx*6+1] = 0.0;
for(int ii = 0; ii < T;ii++) {
if(ii<switchTime) {
sampleStats[idx*6+3] += y[T1+ii];
}
else {
sampleStats[idx*6+5] += y[T1+ii];
}
}
}
}
}
/*
* [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF);
* Inputs:
* 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array)
* 1 = trial index - 0:end-1 are the trial start times (GPU array)
* 2 = trial coherence - on GPU, coherence levels per each trial (GPU array)
* 3 = alpha, firing rates per each state (MATLAB array)
* 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array)
* 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array)
* 6 = delta_t, length of each timebins
*
* Outputs (all in MATLAB array form)
* 0 = z, switching times per each trial, size (NT,1)
* 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1)
* 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2)
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
//load up the GPU array inputs
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * trIndex = kcGetArrayDataInt(prhs[1]);
int * cohIndex = kcGetArrayDataInt(prhs[2],NT);
//put the precalculated negative binomial PDF, CDF values onto the GPU
const mwSize * precalcSize = mxGetDimensions(prhs[5]);
int maxJump = precalcSize[0];
int NC = precalcSize[1];
//mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT);
KC_FP_TYPE * nbPDF;
checkCudaErrors(hipMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump));
checkCudaErrors(hipMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,hipMemcpyHostToDevice));
KC_FP_TYPE dt = mxGetScalar(prhs[6]);
//put model parameters onto the GPU
KC_FP_TYPE * alphas;
checkCudaErrors(hipMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3));
checkCudaErrors(hipMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,hipMemcpyHostToDevice));
KC_FP_TYPE * phi;
checkCudaErrors(hipMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC));
checkCudaErrors(hipMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,hipMemcpyHostToDevice));
//setup space on GPU for sampling
// z,s,sampleStats
// log_post2 - size(TT,1)
// log_post3 - size(TT,1)
KC_FP_TYPE * log_post2;
KC_FP_TYPE * log_post3;
checkCudaErrors(hipMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT));
checkCudaErrors(hipMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT));
KC_FP_TYPE * z;
checkCudaErrors(hipMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * s;
checkCudaErrors(hipMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * sampleStats;
checkCudaErrors(hipMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT));
KC_FP_TYPE * calculationSpace;
checkCudaErrors(hipMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2));
//setup random number generator
hiprandGenerator_t curandGen = 0;
hiprandStatus_t hiprandStatus_t;
hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors sampling semi markov ");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t);
mexErrMsgTxt("CUDA errors sampling semi markov");
}
//generate a uniform random number set (size NT*2)
KC_FP_TYPE * randU;
int randSize = NT+((NT%2==0)?0:1);
checkCudaErrors(hipMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize));
hiprandStatus_t = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize);
hipDeviceSynchronize();
//sample the states
hipLaunchKernelGGL(( kcSampleSMStates), dim3(NT),dim3(1), 0, 0, z, s, sampleStats, y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace);
hipDeviceSynchronize();
//combine the sample stats
KC_FP_TYPE * sampleStats_local;
sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,hipMemcpyDeviceToHost));
hipDeviceSynchronize();
plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL);
KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]);
for(int jj = 0; jj < 6; jj++) {
sampleStats_sum[jj] = 0;
for(int ii = 0; ii < NT; ii++) {
sampleStats_sum[jj] += sampleStats_local[ii*6 + jj];
}
}
//move sampled values to MATLAB
plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost));
//clear out random number generator
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hiprandDestroyGenerator(curandGen));
//clear GPU values
// negative binomial distribution items
checkCudaErrors(hipFree(nbPDF));
// model params
checkCudaErrors(hipFree(alphas));
checkCudaErrors(hipFree(phi));
// sampler stuff
checkCudaErrors(hipFree(log_post2));
checkCudaErrors(hipFree(log_post3));
checkCudaErrors(hipFree(z));
checkCudaErrors(hipFree(s));
checkCudaErrors(hipFree(sampleStats));
free(sampleStats_local);
checkCudaErrors(hipFree(calculationSpace));
// random nums
checkCudaErrors(hipFree(randU));
}
| e39ccdf2f3b78d6c57dc65c2e353d516687bb681.cu | #include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <cuda_runtime.h>
#include <cusparse_v2.h>
#include "cublas_v2.h"
#include <curand.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#include "mex.h"
#include "kcDefs.h" //see for info on anything starting with KC_
#include "kcArrayFunctions.h"
/*
* log_p_y
* log likelihood for a poisson
* y = observed count
* dt = length of observation
*/
__device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt) {
return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1)
}
/* kcSampleSMStates
* kernel runs on each trial (not timebin)
* outputs:
* z = jump times per each trial
* s = which state jumped to
* sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial)
* inputs
* y = spike counts
* trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1]
* y is indexed at 0. This array includes final value that should be length of y)
* trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability)
* coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB
* NT = number of trials
* alpha = (3,1) array, spike rates
* phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent
* delta_t = length of each timebin
* maxJump = the longest to calculate out possible jump time values for
* randU = (NT,1) array a set of uniform random numbers on [0,1]
*
* nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences
*
* jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over
*/
__global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs) {
int idx = blockIdx.x*blockDim.x+threadIdx.x;
if(idx < NT) {
int T1 = trialIndex[idx];
int T = trialIndex[idx+1]-T1;
//index in jumpToProbs for jumping to state 2
int jumpT1_2 = idx*(maxJump*2);
//index in jumpToProbs for jumping to state 3
int jumpT1_3 = idx*(maxJump*2) + maxJump;
int cohIndex = trialCoh[idx]*maxJump;
KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0;
KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]);
//calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end)
for(int ii = T-1; ii >= 0; ii--) {
//taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3)
jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[1],delta_t) ;
jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[2],delta_t) ;
}
KC_FP_TYPE initStateCumsum = 0;
KC_FP_TYPE maxLog = 0;
for(int ii = 0; ii < maxJump; ii++) {
// p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0
if(ii < T) {
KC_FP_TYPE p_y_init = log_p_y(y[T1+ii],alphas[0],delta_t);
initStateCumsum += p_y_init;
if(ii < T-1) {
jumpToProbs[jumpT1_2+ii+1] += initStateCumsum;
jumpToProbs[jumpT1_3+ii+1] += initStateCumsum;
}
}
else {
jumpToProbs[jumpT1_2+ii] = initStateCumsum;
jumpToProbs[jumpT1_3+ii] = initStateCumsum;
}
jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2;
jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3;
maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]);
//maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii];
}
//maxLog /= (maxJump*2.0);
KC_FP_TYPE maxNumToExp = 8;
KC_FP_TYPE minNumToExp = 2;
KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method)
if(maxLog > maxNumToExp) {
extraConst = maxLog-maxNumToExp;
}
else if(maxLog < minNumToExp) {
extraConst = minNumToExp-maxLog;
}
KC_FP_TYPE totalProbCumsum = 0;
for(int ii = 0; ii < maxJump; ii++) {
jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst);
if(phi[trialCoh[idx]] < 1.0) {
jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst);
totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii];
}
else {
totalProbCumsum += jumpToProbs[jumpT1_3+ii];
jumpToProbs[jumpT1_2+ii] = 0.0;
}
}
//goes back through and finds a sampling time + sample to state
KC_FP_TYPE post_cdf = 0;
int switchFound = -1;
int switchTime = 0;
KC_FP_TYPE randn = randU[idx] * totalProbCumsum;
for(int ii = 0; ii < maxJump && switchFound < 1; ii++) {
post_cdf += jumpToProbs[jumpT1_2+ii];
if(post_cdf > randn && phi[trialCoh[idx]] < 1) {
switchFound = 2;
switchTime = ii;
}
else {
post_cdf += jumpToProbs[jumpT1_3+ii];
if(post_cdf > randn) {
switchFound = 3;
switchTime = ii;
}
}
}
if(switchFound <= 0) {
//just to make sure it doesn't crash
switchFound = (KC_LOG(randU[idx])>p3)?2:3;
switchTime = 101;
}
s[idx] = switchFound;
z[idx] = switchTime;
//sum up observed spike count info
sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T);
sampleStats[idx*6+3] = 0;
sampleStats[idx*6+4] = 0;
sampleStats[idx*6+5] = 0;
if(switchFound == 2) {
sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ;
sampleStats[idx*6+2] = 0.0;
for(int ii = 0; ii < T;ii++) {
if(ii<switchTime) {
sampleStats[idx*6+3] += y[T1+ii];
}
else {
sampleStats[idx*6+4] += y[T1+ii];
}
}
}
else {
sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ;
sampleStats[idx*6+1] = 0.0;
for(int ii = 0; ii < T;ii++) {
if(ii<switchTime) {
sampleStats[idx*6+3] += y[T1+ii];
}
else {
sampleStats[idx*6+5] += y[T1+ii];
}
}
}
}
}
/*
* [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF);
* Inputs:
* 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array)
* 1 = trial index - 0:end-1 are the trial start times (GPU array)
* 2 = trial coherence - on GPU, coherence levels per each trial (GPU array)
* 3 = alpha, firing rates per each state (MATLAB array)
* 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array)
* 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array)
* 6 = delta_t, length of each timebins
*
* Outputs (all in MATLAB array form)
* 0 = z, switching times per each trial, size (NT,1)
* 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1)
* 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2)
*/
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) {
//load up the GPU array inputs
unsigned int TT = kcGetArrayNumEl(prhs[0]);
KC_FP_TYPE * y = kcGetArrayData(prhs[0]);
unsigned int NT = kcGetArrayNumEl(prhs[1])-1;
int * trIndex = kcGetArrayDataInt(prhs[1]);
int * cohIndex = kcGetArrayDataInt(prhs[2],NT);
//put the precalculated negative binomial PDF, CDF values onto the GPU
const mwSize * precalcSize = mxGetDimensions(prhs[5]);
int maxJump = precalcSize[0];
int NC = precalcSize[1];
//mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT);
KC_FP_TYPE * nbPDF;
checkCudaErrors(cudaMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump));
checkCudaErrors(cudaMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,cudaMemcpyHostToDevice));
KC_FP_TYPE dt = mxGetScalar(prhs[6]);
//put model parameters onto the GPU
KC_FP_TYPE * alphas;
checkCudaErrors(cudaMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3));
checkCudaErrors(cudaMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,cudaMemcpyHostToDevice));
KC_FP_TYPE * phi;
checkCudaErrors(cudaMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC));
checkCudaErrors(cudaMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,cudaMemcpyHostToDevice));
//setup space on GPU for sampling
// z,s,sampleStats
// log_post2 - size(TT,1)
// log_post3 - size(TT,1)
KC_FP_TYPE * log_post2;
KC_FP_TYPE * log_post3;
checkCudaErrors(cudaMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT));
checkCudaErrors(cudaMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT));
KC_FP_TYPE * z;
checkCudaErrors(cudaMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * s;
checkCudaErrors(cudaMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT));
KC_FP_TYPE * sampleStats;
checkCudaErrors(cudaMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT));
KC_FP_TYPE * calculationSpace;
checkCudaErrors(cudaMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2));
//setup random number generator
curandGenerator_t curandGen = 0;
curandStatus_t curandStatus;
curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-1 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors sampling semi markov ");
}
struct timeval now;
gettimeofday(&now,NULL);
unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec);
curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed);
if(curandStatus != CURAND_STATUS_SUCCESS ) {
mexPrintf("CURAND-2 error %d\n",(int)curandStatus);
mexErrMsgTxt("CUDA errors sampling semi markov");
}
//generate a uniform random number set (size NT*2)
KC_FP_TYPE * randU;
int randSize = NT+((NT%2==0)?0:1);
checkCudaErrors(cudaMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize));
curandStatus = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize);
cudaDeviceSynchronize();
//sample the states
kcSampleSMStates<<<NT,1>>>(z, s, sampleStats, y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace);
cudaDeviceSynchronize();
//combine the sample stats
KC_FP_TYPE * sampleStats_local;
sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,cudaMemcpyDeviceToHost));
cudaDeviceSynchronize();
plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL);
KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]);
for(int jj = 0; jj < 6; jj++) {
sampleStats_sum[jj] = 0;
for(int ii = 0; ii < NT; ii++) {
sampleStats_sum[jj] += sampleStats_local[ii*6 + jj];
}
}
//move sampled values to MATLAB
plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL);
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost));
//clear out random number generator
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(curandDestroyGenerator(curandGen));
//clear GPU values
// negative binomial distribution items
checkCudaErrors(cudaFree(nbPDF));
// model params
checkCudaErrors(cudaFree(alphas));
checkCudaErrors(cudaFree(phi));
// sampler stuff
checkCudaErrors(cudaFree(log_post2));
checkCudaErrors(cudaFree(log_post3));
checkCudaErrors(cudaFree(z));
checkCudaErrors(cudaFree(s));
checkCudaErrors(cudaFree(sampleStats));
free(sampleStats_local);
checkCudaErrors(cudaFree(calculationSpace));
// random nums
checkCudaErrors(cudaFree(randU));
}
|
e0eff4117db03cc3c7b8b7543951bf6836511d75.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cunn_TemporalMaxPooling_updateGradInputKernel(float *gradInput, float *gradOutput, float *indices, int input_w, int input_n, int output_w, int kW, int dW) {
// Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index
float *gradInput_data = gradInput + blockIdx.x * input_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW;
float *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
float *indices_data = indices + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
int feat = 0;
if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) {
// For all features
for (feat = 0; feat < input_n; ++feat) {
gradInput_data[(int)indices_data[feat] * input_n + feat] += gradOutput_data[feat];
}
}
} | e0eff4117db03cc3c7b8b7543951bf6836511d75.cu | #include "includes.h"
__global__ void cunn_TemporalMaxPooling_updateGradInputKernel(float *gradInput, float *gradOutput, float *indices, int input_w, int input_n, int output_w, int kW, int dW) {
// Block idx is the batch index, thread idx + block idx y * MAX_THREADS is the time index
float *gradInput_data = gradInput + blockIdx.x * input_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n * dW;
float *gradOutput_data = gradOutput + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
float *indices_data = indices + blockIdx.x * output_w * input_n + (
threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS) * input_n;
int feat = 0;
if (threadIdx.x + blockIdx.y * TEMPORAL_MAX_POOLING_THREADS < output_w) {
// For all features
for (feat = 0; feat < input_n; ++feat) {
gradInput_data[(int)indices_data[feat] * input_n + feat] += gradOutput_data[feat];
}
}
} |
76e48614c94673af9762eb0df7df0af514a865b3.hip | // !!! This is a file automatically generated by hipify!!!
// RUN: %clang_cc1 -fsyntax-only -verify %s
void hipConfigureCall(unsigned gridSize, unsigned blockSize); // expected-error {{must have scalar return type}}
| 76e48614c94673af9762eb0df7df0af514a865b3.cu | // RUN: %clang_cc1 -fsyntax-only -verify %s
void cudaConfigureCall(unsigned gridSize, unsigned blockSize); // expected-error {{must have scalar return type}}
|
c1c0dd4a733a853da6af21465ff5aa50784b03cf.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <hip/hip_runtime_api.h>
#include <typeinfo>
#include <hipcub/hipcub.hpp>
#include <hipcub/hipcub.hpp>
#include <cub/util_ptx.cuh>
#include <hipcub/hipcub.hpp>
#include <cub/util_debug.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Generic reduction (full, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data[0], reduction_op);
}
/// Generic reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data, reduction_op);
}
/// Generic reduction (partial, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, ReductionOp &reduction_op, int valid_threads)
{
return block_reduce.Reduce(data, reduction_op, valid_threads);
}
/// Sum reduction (full, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], Sum &reduction_op)
{
return block_reduce.Sum(data[0]);
}
/// Sum reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], Sum &reduction_op)
{
return block_reduce.Sum(data);
}
/// Sum reduction (partial, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, Sum &reduction_op, int valid_threads)
{
return block_reduce.Sum(data, valid_threads);
}
/**
* Test full-tile reduction kernel (where num_items is an even
* multiple of BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void FullTileReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
int tiles,
clock_t *d_elapsed)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Cooperative thread block reduction utility type (returns aggregate in thread 0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T data[ITEMS_PER_THREAD];
// Load first tile of data
int block_offset = 0;
if (block_offset < TILE_SIZE * tiles)
{
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperative reduce first tile
BlockReduceT block_reduce(temp_storage) ;
T block_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Loop over input tiles
while (block_offset < TILE_SIZE * tiles)
{
// TestBarrier between thread block reductions
__syncthreads();
// Load tile of data
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
elapsed += (start > stop) ? start - stop : stop - start;
// Reduce thread block aggregate
block_aggregate = reduction_op(block_aggregate, tile_aggregate);
}
// Store data
if (linear_tid == 0)
{
d_out[0] = block_aggregate;
*d_elapsed = elapsed;
}
}
}
/**
* Test partial-tile reduction kernel (where num_items < BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void PartialTileReduceKernel(
T *d_in,
T *d_out,
int num_items,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative thread block reduction utility type (returns aggregate only in thread-0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T partial;
// Load partial tile data
if (linear_tid < num_items)
{
partial = d_in[linear_tid];
}
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, partial, reduction_op, num_items);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Store data
if (linear_tid == 0)
{
d_out[0] = tile_aggregate;
*d_elapsed = elapsed;
}
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
T *h_in,
T h_reference[1],
ReductionOp reduction_op,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
if (i == 0)
h_reference[0] = h_in[0];
else
h_reference[0] = reduction_op(h_reference[0], h_in[i]);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n");
}
}
//---------------------------------------------------------------------
// Full tile test generation
//---------------------------------------------------------------------
/**
* Test full-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<true> /*sufficient_resources*/)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
int num_items = TILE_SIZE * tiles;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * 1));
// Test multi-tile (unguarded)
printf("TestFullTile %s, %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), ITEMS_PER_THREAD(%d), tiles(%d), %s (%d bytes) elements:\n",
Equals<ReductionOp, Sum>::VALUE ? "Sum" : "Max",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
ITEMS_PER_THREAD,
tiles,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
hipLaunchKernelGGL(( FullTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD>), dim3(1), dim3(block_dims), 0, 0,
d_in,
d_out,
reduction_op,
tiles,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test full-tile reduction. (Specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Test full-tile reduction.
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512),
#else
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024),
#endif
};
TestFullTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of tests for different thread block dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, 1, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 2, 2, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of tests for different thread items
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 4, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of full-tile tests for different numbers of tiles
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (int tiles = 1; tiles < 3; tiles++)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(gen_mode, tiles, reduction_op);
}
}
//---------------------------------------------------------------------
// Partial-tile test generation
//---------------------------------------------------------------------
/**
* Test partial-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<true> /*sufficient_resources*/)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(hipMemcpy(d_in, h_in, sizeof(T) * num_items, hipMemcpyHostToDevice));
CubDebugExit(hipMemset(d_out, 0, sizeof(T) * 1));
printf("TestPartialTile %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), %s (%d bytes) elements:\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
hipLaunchKernelGGL(( PartialTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z>), dim3(1), dim3(block_dims), 0, 0,
d_in,
d_out,
num_items,
reduction_op,
d_elapsed);
CubDebugExit(hipPeekAtLastError());
CubDebugExit(hipDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test partial-tile reduction (specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512,
#else
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024,
#endif
};
TestPartialTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, T>(gen_mode, num_items, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (
int num_items = 1;
num_items < BLOCK_THREADS;
num_items += CUB_MAX(1, BLOCK_THREADS / 5))
{
TestPartialTile<ALGORITHM, BLOCK_THREADS, 1, 1, T>(gen_mode, num_items, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, 2, 2, T>(gen_mode, num_items, reduction_op);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Run battery of full-tile tests for different gen modes
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
if (Traits<T>::CATEGORY != FLOATING_POINT)
{
// Don't test randomly-generated floats b/c of stability
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
}
}
/**
* Run battery of tests for different block-reduction algorithmic variants
*/
template <
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
(void)reduction_op;
#ifdef TEST_RAKING
Test<BLOCK_REDUCE_RAKING, BLOCK_THREADS, T>(reduction_op);
Test<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, BLOCK_THREADS, T>(reduction_op);
#endif
#ifdef TEST_WARP_REDUCTIONS
Test<BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_THREADS, T>(reduction_op);
#endif
}
/**
* Run battery of tests for different block sizes
*/
template <
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
Test<7, T>(reduction_op);
Test<32, T>(reduction_op);
Test<63, T>(reduction_op);
Test<97, T>(reduction_op);
Test<128, T>(reduction_op);
Test<238, T>(reduction_op);
}
/**
* Run battery of tests for different block sizes
*/
template <typename T>
void Test()
{
Test<T>(Sum());
Test<T>(Max());
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
printf("\n full tile ------------------------\n\n");
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
printf("\n partial tile ------------------------\n\n");
TestPartialTile<BLOCK_REDUCE_RAKING, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, int>(RANDOM, 7, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// primitives
Test<char>();
Test<short>();
Test<int>();
Test<long long>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double>();
Test<float>();
// vector types
Test<char2>();
Test<short2>();
Test<int2>();
Test<longlong2>();
Test<char4>();
Test<short4>();
Test<int4>();
Test<longlong4>();
// Complex types
Test<TestFoo>();
Test<TestBar>();
}
#endif
return 0;
}
| c1c0dd4a733a853da6af21465ff5aa50784b03cf.cu | /******************************************************************************
* Copyright (c) 2011, Duane Merrill. All rights reserved.
* Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
******************************************************************************/
/******************************************************************************
* Test of BlockReduce utilities
******************************************************************************/
// Ensure printing of CUDA runtime errors to console
#define CUB_STDERR
#include <stdio.h>
#include <cuda_runtime_api.h>
#include <typeinfo>
#include <cub/block/block_reduce.cuh>
#include <cub/block/block_load.cuh>
#include <cub/util_ptx.cuh>
#include <cub/util_allocator.cuh>
#include <cub/util_debug.cuh>
#include "test_util.h"
using namespace cub;
//---------------------------------------------------------------------
// Globals, constants and typedefs
//---------------------------------------------------------------------
bool g_verbose = false;
int g_repeat = 0;
CachingDeviceAllocator g_allocator(true);
//---------------------------------------------------------------------
// Test kernels
//---------------------------------------------------------------------
/// Generic reduction (full, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data[0], reduction_op);
}
/// Generic reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], ReductionOp &reduction_op)
{
return block_reduce.Reduce(data, reduction_op);
}
/// Generic reduction (partial, 1)
template <typename BlockReduceT, typename T, typename ReductionOp>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, ReductionOp &reduction_op, int valid_threads)
{
return block_reduce.Reduce(data, reduction_op, valid_threads);
}
/// Sum reduction (full, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[1], Sum &reduction_op)
{
return block_reduce.Sum(data[0]);
}
/// Sum reduction (full, ITEMS_PER_THREAD)
template <typename BlockReduceT, typename T, int ITEMS_PER_THREAD>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T (&data)[ITEMS_PER_THREAD], Sum &reduction_op)
{
return block_reduce.Sum(data);
}
/// Sum reduction (partial, 1)
template <typename BlockReduceT, typename T>
__device__ __forceinline__ T DeviceTest(
BlockReduceT &block_reduce, T &data, Sum &reduction_op, int valid_threads)
{
return block_reduce.Sum(data, valid_threads);
}
/**
* Test full-tile reduction kernel (where num_items is an even
* multiple of BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void FullTileReduceKernel(
T *d_in,
T *d_out,
ReductionOp reduction_op,
int tiles,
clock_t *d_elapsed)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
// Cooperative thread block reduction utility type (returns aggregate in thread 0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T data[ITEMS_PER_THREAD];
// Load first tile of data
int block_offset = 0;
if (block_offset < TILE_SIZE * tiles)
{
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperative reduce first tile
BlockReduceT block_reduce(temp_storage) ;
T block_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Loop over input tiles
while (block_offset < TILE_SIZE * tiles)
{
// TestBarrier between thread block reductions
__syncthreads();
// Load tile of data
LoadDirectBlocked(linear_tid, d_in + block_offset, data);
block_offset += TILE_SIZE;
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, data, reduction_op);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
elapsed += (start > stop) ? start - stop : stop - start;
// Reduce thread block aggregate
block_aggregate = reduction_op(block_aggregate, tile_aggregate);
}
// Store data
if (linear_tid == 0)
{
d_out[0] = block_aggregate;
*d_elapsed = elapsed;
}
}
}
/**
* Test partial-tile reduction kernel (where num_items < BLOCK_THREADS)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
__launch_bounds__ (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z)
__global__ void PartialTileReduceKernel(
T *d_in,
T *d_out,
int num_items,
ReductionOp reduction_op,
clock_t *d_elapsed)
{
// Cooperative thread block reduction utility type (returns aggregate only in thread-0)
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z> BlockReduceT;
// Allocate temp storage in shared memory
__shared__ typename BlockReduceT::TempStorage temp_storage;
int linear_tid = RowMajorTid(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
// Per-thread tile data
T partial;
// Load partial tile data
if (linear_tid < num_items)
{
partial = d_in[linear_tid];
}
// Start cycle timer
clock_t start = clock();
// Cooperatively reduce the tile's aggregate
BlockReduceT block_reduce(temp_storage) ;
T tile_aggregate = DeviceTest(block_reduce, partial, reduction_op, num_items);
// Stop cycle timer
#if CUB_PTX_ARCH == 100
// Bug: recording stop clock causes mis-write of running prefix value
clock_t stop = 0;
#else
clock_t stop = clock();
#endif // CUB_PTX_ARCH == 100
clock_t elapsed = (start > stop) ? start - stop : stop - start;
// Store data
if (linear_tid == 0)
{
d_out[0] = tile_aggregate;
*d_elapsed = elapsed;
}
}
//---------------------------------------------------------------------
// Host utility subroutines
//---------------------------------------------------------------------
/**
* Initialize problem (and solution)
*/
template <
typename T,
typename ReductionOp>
void Initialize(
GenMode gen_mode,
T *h_in,
T h_reference[1],
ReductionOp reduction_op,
int num_items)
{
for (int i = 0; i < num_items; ++i)
{
InitValue(gen_mode, h_in[i], i);
if (i == 0)
h_reference[0] = h_in[0];
else
h_reference[0] = reduction_op(h_reference[0], h_in[i]);
}
if (g_verbose)
{
printf("Input:\n");
DisplayResults(h_in, num_items);
printf("\n");
}
}
//---------------------------------------------------------------------
// Full tile test generation
//---------------------------------------------------------------------
/**
* Test full-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<true> /*sufficient_resources*/)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS * ITEMS_PER_THREAD;
int num_items = TILE_SIZE * tiles;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * num_items));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1));
// Test multi-tile (unguarded)
printf("TestFullTile %s, %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), ITEMS_PER_THREAD(%d), tiles(%d), %s (%d bytes) elements:\n",
Equals<ReductionOp, Sum>::VALUE ? "Sum" : "Max",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
ITEMS_PER_THREAD,
tiles,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
FullTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD><<<1, block_dims>>>(
d_in,
d_out,
reduction_op,
tiles,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test full-tile reduction. (Specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Test full-tile reduction.
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512),
#else
sufficient_smem = (sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024),
sufficient_threads = ((BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024),
#endif
};
TestFullTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of tests for different thread block dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
int ITEMS_PER_THREAD,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, 1, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 2, 2, ITEMS_PER_THREAD, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of tests for different thread items
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
int tiles,
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, 1, T>(gen_mode, tiles, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, 4, T>(gen_mode, tiles, reduction_op);
}
/**
* Run battery of full-tile tests for different numbers of tiles
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestFullTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (int tiles = 1; tiles < 3; tiles++)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(gen_mode, tiles, reduction_op);
}
}
//---------------------------------------------------------------------
// Partial-tile test generation
//---------------------------------------------------------------------
/**
* Test partial-tile reduction. (Specialized for sufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<true> /*sufficient_resources*/)
{
const int BLOCK_THREADS = BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z;
const int TILE_SIZE = BLOCK_THREADS;
// Allocate host arrays
T *h_in = new T[num_items];
T h_reference[1];
// Initialize problem
Initialize(gen_mode, h_in, h_reference, reduction_op, num_items);
// Initialize/clear device arrays
T *d_in = NULL;
T *d_out = NULL;
clock_t *d_elapsed = NULL;
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_elapsed, sizeof(unsigned long long)));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_in, sizeof(T) * TILE_SIZE));
CubDebugExit(g_allocator.DeviceAllocate((void**)&d_out, sizeof(T) * 1));
CubDebugExit(cudaMemcpy(d_in, h_in, sizeof(T) * num_items, cudaMemcpyHostToDevice));
CubDebugExit(cudaMemset(d_out, 0, sizeof(T) * 1));
printf("TestPartialTile %s, gen-mode %d, num_items(%d), BLOCK_THREADS(%d) (%d,%d,%d), %s (%d bytes) elements:\n",
(ALGORITHM == BLOCK_REDUCE_RAKING) ? "BLOCK_REDUCE_RAKING" : (ALGORITHM == BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY) ? "BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY" : "BLOCK_REDUCE_WARP_REDUCTIONS",
gen_mode,
num_items,
BLOCK_THREADS, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z,
typeid(T).name(),
(int) sizeof(T));
fflush(stdout);
dim3 block_dims(BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z);
PartialTileReduceKernel<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z><<<1, block_dims>>>(
d_in,
d_out,
num_items,
reduction_op,
d_elapsed);
CubDebugExit(cudaPeekAtLastError());
CubDebugExit(cudaDeviceSynchronize());
// Copy out and display results
printf("\tReduction results: ");
int compare = CompareDeviceResults(h_reference, d_out, 1, g_verbose, g_verbose);
printf("%s\n", compare ? "FAIL" : "PASS");
AssertEquals(0, compare);
printf("\tElapsed clocks: ");
DisplayDeviceResults(d_elapsed, 1);
// Cleanup
if (h_in) delete[] h_in;
if (d_in) CubDebugExit(g_allocator.DeviceFree(d_in));
if (d_out) CubDebugExit(g_allocator.DeviceFree(d_out));
if (d_elapsed) CubDebugExit(g_allocator.DeviceFree(d_elapsed));
}
/**
* Test partial-tile reduction (specialized for insufficient resources)
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op,
Int2Type<false> sufficient_resources)
{}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_DIM_X,
int BLOCK_DIM_Y,
int BLOCK_DIM_Z,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
int num_items,
ReductionOp reduction_op)
{
// Check size of smem storage for the target arch to make sure it will fit
typedef BlockReduce<T, BLOCK_DIM_X, ALGORITHM, BLOCK_DIM_Y, BLOCK_DIM_Z, TEST_ARCH> BlockReduceT;
enum
{
#if defined(SM100) || defined(SM110) || defined(SM130)
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 16 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 512,
#else
sufficient_smem = sizeof(typename BlockReduceT::TempStorage) <= 48 * 1024,
sufficient_threads = (BLOCK_DIM_X * BLOCK_DIM_Y * BLOCK_DIM_Z) <= 1024,
#endif
};
TestPartialTile<ALGORITHM, BLOCK_DIM_X, BLOCK_DIM_Y, BLOCK_DIM_Z, T>(gen_mode, num_items, reduction_op, Int2Type<sufficient_smem && sufficient_threads>());
}
/**
* Run battery of partial-tile tests for different numbers of effective threads and thread dimensions
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void TestPartialTile(
GenMode gen_mode,
ReductionOp reduction_op)
{
for (
int num_items = 1;
num_items < BLOCK_THREADS;
num_items += CUB_MAX(1, BLOCK_THREADS / 5))
{
TestPartialTile<ALGORITHM, BLOCK_THREADS, 1, 1, T>(gen_mode, num_items, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, 2, 2, T>(gen_mode, num_items, reduction_op);
}
}
//---------------------------------------------------------------------
// Main
//---------------------------------------------------------------------
/**
* Run battery of full-tile tests for different gen modes
*/
template <
BlockReduceAlgorithm ALGORITHM,
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(UNIFORM, reduction_op);
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(INTEGER_SEED, reduction_op);
if (Traits<T>::CATEGORY != FLOATING_POINT)
{
// Don't test randomly-generated floats b/c of stability
TestFullTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
TestPartialTile<ALGORITHM, BLOCK_THREADS, T>(RANDOM, reduction_op);
}
}
/**
* Run battery of tests for different block-reduction algorithmic variants
*/
template <
int BLOCK_THREADS,
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
(void)reduction_op;
#ifdef TEST_RAKING
Test<BLOCK_REDUCE_RAKING, BLOCK_THREADS, T>(reduction_op);
Test<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, BLOCK_THREADS, T>(reduction_op);
#endif
#ifdef TEST_WARP_REDUCTIONS
Test<BLOCK_REDUCE_WARP_REDUCTIONS, BLOCK_THREADS, T>(reduction_op);
#endif
}
/**
* Run battery of tests for different block sizes
*/
template <
typename T,
typename ReductionOp>
void Test(
ReductionOp reduction_op)
{
Test<7, T>(reduction_op);
Test<32, T>(reduction_op);
Test<63, T>(reduction_op);
Test<97, T>(reduction_op);
Test<128, T>(reduction_op);
Test<238, T>(reduction_op);
}
/**
* Run battery of tests for different block sizes
*/
template <typename T>
void Test()
{
Test<T>(Sum());
Test<T>(Max());
}
/**
* Main
*/
int main(int argc, char** argv)
{
// Initialize command line
CommandLineArgs args(argc, argv);
g_verbose = args.CheckCmdLineFlag("v");
args.GetCmdLineArgument("repeat", g_repeat);
// Print usage
if (args.CheckCmdLineFlag("help"))
{
printf("%s "
"[--device=<device-id>] "
"[--repeat=<repetitions of entire test suite>]"
"[--v] "
"\n", argv[0]);
exit(0);
}
// Initialize device
CubDebugExit(args.DeviceInit());
// Get ptx version
int ptx_version = 0;
CubDebugExit(PtxVersion(ptx_version));
#ifdef QUICK_TEST
// Compile/run quick tests
printf("\n full tile ------------------------\n\n");
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 4, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
TestFullTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, 1, int>(RANDOM, 1, Sum());
printf("\n partial tile ------------------------\n\n");
TestPartialTile<BLOCK_REDUCE_RAKING, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY, 128, 1, 1, int>(RANDOM, 7, Sum());
TestPartialTile<BLOCK_REDUCE_WARP_REDUCTIONS, 128, 1, 1, int>(RANDOM, 7, Sum());
#else
// Compile/run thorough tests
for (int i = 0; i <= g_repeat; ++i)
{
// primitives
Test<char>();
Test<short>();
Test<int>();
Test<long long>();
if (ptx_version > 120) // Don't check doubles on PTX120 or below because they're down-converted
Test<double>();
Test<float>();
// vector types
Test<char2>();
Test<short2>();
Test<int2>();
Test<longlong2>();
Test<char4>();
Test<short4>();
Test<int4>();
Test<longlong4>();
// Complex types
Test<TestFoo>();
Test<TestBar>();
}
#endif
return 0;
}
|
679aa73a555af53e38cdd7381e5fc426c4c76f8a.hip | // !!! This is a file automatically generated by hipify!!!
/* Execution Format : ./<exe> <drug_result_1_dict_compounds.txt> <drug_result_2_dict_compounds.txt> <drug_result_1_dict_proteins.txt> <drug_result_2_dict_proteins.txt> <para.txt> <drug name>
*/
#include <stdio.h>
#include <errno.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/dir.h>
#include <stdbool.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
//Structure for key-value pairs in dictionary
struct kvpair {
char *key;
int value;
struct kvpair *next;
};
//Structure for dictionary
typedef struct dictionary {
int size; // size of the pointer table
int n; // number of elements stored
struct kvpair **table;
}*Dictionary;
//Structure for nodes in the CUDA hashtable
typedef struct node {
char key[80];
int index;
struct node *next;
} Node;
//Function to compute hash value
unsigned long computeHash(const char *s)
{
unsigned const char *us;
unsigned long h;
h = 0;
for(us = (unsigned const char *) s; *us; us++) {
h = h * 401 + *us;
}
return h;
}
// Function to create empty dictionary
Dictionary createDictionary()
{
Dictionary d;
int i;
d = (Dictionary)malloc(sizeof(*d));
if(d==NULL){
printf("dictionary d malloc failed\n");
exit(0);
}
assert(d != 0);
d->size = 102397;
d->n = 0;
d->table = (kvpair **)malloc(sizeof(struct kvpair *) * d->size);
if(d->table==NULL){
printf("d->table malloc failed\n");
exit(0);
}
assert(d->table != 0);
for(i = 0; i < d->size; i++)
d->table[i] = 0;
return d;
}
// Function to insert a new key-value pair into the dictionary
void insertDictionary(Dictionary d, const char *key, int val)
{
struct kvpair *e;
unsigned long h;
assert(key);
assert(val);
e = (kvpair*)malloc(sizeof(*e));
if(e==NULL){
printf("e kvpair malloc failed\n");
exit(0);
}
assert(e);
e->key = strdup(key);
e->value = val;
h = computeHash(key) % d->size;
e->next = d->table[h];
d->table[h] = e;
d->n++;
return;
}
//Function to search for a key in the dictionary, returns NULL or the Node of the key if found in the dictionary
struct kvpair * searchDictionary(Dictionary d, const char *key)
{
struct kvpair *e;
for(e = d->table[computeHash(key) % d->size]; e != 0; e = e->next) {
if(!strcmp(e->key, key)) {
return e;
}
}
return NULL;
}
// Function to delete key-value pair in dictionary --this is currently not used
void deleteDictionary(Dictionary d, const char *key)
{
struct kvpair **prev;
struct kvpair *e;
for(prev = &(d->table[computeHash(key) % d->size]);
*prev != 0;
prev = &((*prev)->next)) {
if(!strcmp((*prev)->key, key)) {
e = *prev;
*prev = e->next;
free(e->key);
free(e);
return;
}
}
}
//Function to free dictionary
void destroyDictionary(Dictionary d)
{
int i;
struct kvpair *e;
struct kvpair *next;
for(i = 0; i < d->size; i++) {
for(e = d->table[i]; e != 0; e = next) {
next = e->next;
free(e->key);
free(e);
}
}
free(d->table);
free(d);
return;
}
//Function to compute normal distribution of a value, equivalent to Python's CDF.norm from NVIDIA CUDA samples
__device__ float CND(float d)
{
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double RSQRT2PI = 0.39894228040143267793994605993438;
double
K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double
cnd = RSQRT2PI * exp(- 0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0 - cnd;
return cnd;
}
//Function to remove specific characters from input string, used to remove carriage returns
void removeChar(char *str, char garbage) {
char *src, *dst;
for (src = dst = str; *src != '\0'; src++) {
*dst = *src;
if (*dst != garbage) dst++;
}
*dst = '\0';
return;
}
//Function used by qsort to sort the records based on number of tokens
int sort(const void* a, const void* b)
{
char *ia = strdup(*(const char **)a);
char *ib = strdup(*(const char **)b);
char *split1, *saveptr, *saveptr1;
split1 = strtok_r(ia, ";", &saveptr);
split1 = strtok_r(NULL, ";", &saveptr);
int x = atoi(split1);
split1 = strtok_r(ib, ";", &saveptr1);
split1 = strtok_r(NULL, ";", &saveptr1);
return (x-atoi(split1));
}
/*Kernel function performs random sampling
It is designed in this way: One block does one sampling and every thread processes one record. In case number of records to be processed exceed 1024, then some threads will take more than one stride. That is, some threads process more than one record during one sampling.
Stages in kernel function are:
1) Build the dictionary 's_hashtab' is shared memory, for O(1) time lookup of keyword while sampling, first thread will ensure all the keys 'd_r1_dict_keys' are linked in the hashtable.
2) Generate the random numbers and sort them. Every thread will generate a random number and first thread in every block will sort the random numbers using iterative quick sort function. In case number of random numbers required are higher than 1024, then some threads will take more strides to generate the required number of random numbers.
3) Shared memory initialization for sampling. This is required, as during sampling if keyword is found then we increment the count.
4) Sampling, every block performs one sampling. And, every thread will process atleast one record. That is, thread will extract the keywords/tokens in the record and then finds for the keyword in the dictionary, if found then shared memory is incremented.
5) Copy data to global memory from shared memory for Z-score and P-value calculation.
Note: Shared memory s_hashtab - is the hash table in the shared memory.
Shared memory s_r1_value_list - is the value list in the shared memory.
Arguments passed to kernel function:
* d_r2_str - is the list of records to be used for sampling.
* d_r1_dict_value - Global values for vector produced from sampling.
* sampleTimes - number of samples.
* sampleSize - size of the sample.
* randomRange - maximum value of each random number.
* r1_dict_cnt - number of keywords in dictionary 1.
* d_r1_dict_keys - keywords of dictionary 1, to populate hashtable in kernel function.
* d_hashtab - global memory hash table.
* sampleStrides - maximum number of strides every thread will take for sampling.
* threadCount - number of threads per block.
* samplesCompleted - number of samplings completed before this kernel launch.
* relaunch - to decide whether kernel is launched for the first time or relaunched.
*/
__global__ void deviceDDI(char * d_r2_str, int * d_r1_dict_value, int sampleTimes, int sampleSize, int randomRange, int r1_dict_cnt, Node *d_r1_dict_keys, Node **d_hashtab, int sampleStrides, int threadCount, int samplesCompleted, bool relaunch){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j, k, x = 0, ind1, random;
unsigned hashval;
char str_split[80];
extern __shared__ int s_r1_value_list[];
__shared__ Node *s_hashtab[5003];
//s_r1_value_list array has array for keywords found during sampling.
//This is used for incrementing array of keywords for sampling.
//Build the dictionary
if(threadIdx.x==0){
if(!relaunch){
//Initialize the hash table
for(j=0;j<5003;j++){
s_hashtab[j] = 0;
}
j=0;
k=0;
//Build the dictionary
for(j=0;j<r1_dict_cnt;j++){
x=0;
hashval = 0;
while(d_r1_dict_keys[j].key[x]!='\0'){
hashval = ((int)d_r1_dict_keys[j].key[x++] + 401*hashval)%5003;
}
d_r1_dict_keys[j].next = (s_hashtab[hashval]==0?NULL:s_hashtab[hashval]);
s_hashtab[hashval] = &d_r1_dict_keys[j];
}
for(j=0;j<5003;j++){
d_hashtab[j] = s_hashtab[j];
}
}
else{
for(j=0;j<5003;j++){
s_hashtab[j] = d_hashtab[j];
}
}
//Initialize the shared memory
for(j=0;j<(r1_dict_cnt);j++){
s_r1_value_list[j] = 0;
}
}
__syncthreads();
//Generate the random numbers
hiprandState_t state;
hiprand_init(clock64(), i, 0, &state);
//Sampling
for(j=0;(j<sampleStrides)&&((threadIdx.x+(j*threadCount))<sampleSize);j++){
//char str_split[80];
x=0;
hashval = 0;
random = hiprand(&state)%randomRange;
for(k=random*1000;k<random*1000+1000;k++){
if(d_r2_str[k] == '^')
break;
if(d_r2_str[k] != '~'){
str_split[x++] = d_r2_str[k];
hashval = ((int)d_r2_str[k] + 401*hashval)%5003;
}
else{
//str_split[x] = '\0';
Node *np = s_hashtab[hashval];
//np = s_hashtab[hashval];
while((np!= NULL)&&(np!=0)){
ind1 = 0;
while((np->key[ind1] != '\0')&&(ind1<x)){
if(np->key[ind1] == str_split[ind1])
ind1++;
else
break;
}
if((np->key[ind1] == '\0')&&(ind1==x)){
atomicAdd(&s_r1_value_list[(np->index)],1);
break;
}
if(np->next == NULL||np->next==0)
break;
np = np->next;
}
x=0;
hashval = 0;
}
}
}
__syncthreads();
//Copy to global memory from shared memory
if(threadIdx.x==0){
for(j=0;j<(r1_dict_cnt);j++){
d_r1_dict_value[(j)*(sampleTimes+1)+blockIdx.x+1+samplesCompleted] = s_r1_value_list[j];
}
}
}
/* deviceZP: Function to calculate P-value
One thread operates on one array to compute Z-score and P-value
Arguments passed to the kernel function:
* d_r1_dict_value - array holding the
* d_z_score - array to hold Z-scores.
* d_p_value - array to hold P-values.
*/
__global__ void deviceZP(int * d_r1_dict_value, int sampleTimes,int r1_dict_cnt, float * d_z_score, float * d_p_value){
int x, j, i = blockDim.x * blockIdx.x + threadIdx.x;
float mean =0,sd;
if(i<r1_dict_cnt){
x = 0;
sd = 0;
for(j=1;j<=sampleTimes;j++){
x += d_r1_dict_value[(i*(sampleTimes+1))+j];
}
mean = x/(sampleTimes);
for(j=1;j<=sampleTimes;j++){
sd += (d_r1_dict_value[(i*(sampleTimes+1))+j]-mean)*(d_r1_dict_value[(i*(sampleTimes+1))+j]-mean);
}
sd = sqrt(sd/(sampleTimes));
if(fabs(sd)>pow(10.0,-7))
d_z_score[i] = (d_r1_dict_value[i*(sampleTimes+1)] - mean)/sd;
else{
if(d_r1_dict_value[i*(sampleTimes+1)] != (int)mean)
d_z_score[i] = d_r1_dict_value[i*(sampleTimes+1)]*100;
else
d_z_score[i] = -100;
}
d_p_value[i] = 1-CND(d_z_score[i]);
}
}
//Function to partition records while sorting based on Z-score, called by quickSort
int partition( float a[], int index[], int l, int r) {
int i, j, t;
float temp;
float pivot = a[l];
i = l;
j = r+1;
while( 1)
{
do ++i;
while( a[i] >= pivot && i <= r );
do --j;
while( a[j] < pivot );
if( i >= j )
break;
temp = a[i];
a[i] = a[j];
a[j] = temp;
t = index[i];
index[i] = index[j];
index[j] = t;
}
temp = a[l];
a[l] = a[j];
a[j] = temp;
t = index[l];
index[l] = index[j];
index[j] = t;
return j;
}
//Function to quicksort the records based on Z-score
void quickSort(float a[], int index[], int l, int r)
{
int j;
if( l < r )
{
j = partition( a, index, l, r);
quickSort( a, index, l, j-1);
quickSort( a, index, j+1, r);
}
return;
}
int main(int argc, char *argv[])
{
if(argc!=7){
printf("\nIncorrect arguments passed, Please pass <Compounds with interactions>, <Compounds without interactions>, <Proteins with interactions>, <Proteins without interactions>, <PMID Substances>, <para.txt>, <Drug Name> as arguments\n");
exit(1);
}
FILE *inp_r1, *inp_r2, *inp_para, *op1, *op2;
char *split0,*split1, *saveptr, *saveptr1, *saveptr2, *inp2_list[100000];
char filename1[100], filename2[100], cutoffstr[20], pvaluestr[20], str1[10000], rmode[2] = "r";
size_t len = 0;
Dictionary d_cinp1;
int cutoff, sampleTimes, i=0, j=0, k=0, r1_cnt, r2_cnt, r1_dict_cnt, threadCount, sampleStrides;
float p_value, elapsedTime, totalTime=0;
hipEvent_t start, stop;
printf("Drug name = %s\n",argv[6]);
printf("Read input files\n");
d_cinp1 = createDictionary();
//Read the parameters from para.txt - 4th argument
inp_para = fopen(argv[5],rmode);
if (inp_para == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[5]);
exit(1);
}
while(1)
{
fscanf(inp_para,"%[^\n]%*c", str1);
if(feof(inp_para)) break;
split0 = strtok_r(str1, "\t", &saveptr);
split1 = strtok_r(NULL, "\t", &saveptr);
removeChar(split0,'\r');
removeChar(split1,'\r');
if( strcmp(split0,"sampleTimes") == 0)
{
char temp[20];
strcpy(temp, split1);
sampleTimes = atoi(temp);
}
else if( strcmp(split0,"cutoff") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(cutoffstr,temp);
cutoff = atoi(temp);
}
else if( strcmp(split0,"p_value") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(pvaluestr,temp);
p_value = atof(temp);
}
}
fclose(inp_para);
printf("Number of Samples = %d\n",sampleTimes);
if(sampleTimes <=0){
printf("Incorrect number of samples specified = %d, value of atleast 1 is expected\n", sampleTimes);
exit(0);
}
// Reading the dictionary of compounds of result 1 - 1st argument
// Create and populate dictionary 'd_cinp1' while reading the records
inp_r1 = fopen(argv[1], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[1]);
exit(1);
}
r1_cnt = 0;
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
r1_cnt++;
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("malloc to newstr failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct kvpair * e = searchDictionary(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
insertDictionary(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 2nd argument
inp_r2 = fopen(argv[2], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[2]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("malloc to inp2_list[r2_cnt] failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
printf("Sample size = %d\n", r1_cnt);
//Sort inp2_list based on the number of tokens or length
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
hipSetDevice(0);
//populate value list for dictionary 1
hipError_t err = hipSuccess;
int * r1_dict_value;
//pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Device value list for dictionary 1
int *d_r1_dict_value;
Node * r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("malloc to r1_dict_keys failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
destroyDictionary(d_cinp1);
//Strip off the number of tokens from every record in list 2.
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr1);
}
//Process the records for shipping to kernel
char * temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1==NULL){
printf("temp1 malloc failed\n");
exit(0);
}
char * d_r2_str;
j=0;
for(i=0;i<r2_cnt;i++){
for(k=0;k<1000;k++){
if(k<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][k];
}
else
temp1[j++] = '^';
}
free(inp2_list[i]);
}
//char* d_r1_dict_list;
//Allocate global memory for dictionary 1 keywords
/*err = hipMalloc((void **)&d_r1_dict_list,80*sizeof(char)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_list (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_list,r1_dict_list,80*sizeof(char)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_list (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}*/
//Allocate global memory for input list 2 records
err = hipMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for dictionary 1 value list
err = hipMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Node * d_r1_dict_keys = NULL;
err = hipMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Node **hashtab;
Node **d_hashtab;
err = hipMalloc((void **)&d_hashtab,sizeof(Node*)*5003);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_hashtab (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hashtab = (Node**)malloc(sizeof(Node*)*5003);
if(hashtab==NULL){
printf("hashtab malloc failed\n");
exit(0);
}
printf("Sampling for compounds begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread needs to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( deviceDDI), dim3((sampleTimes-i)>256?256:(sampleTimes-i)), dim3(threadCount), (sizeof(int)*(r1_dict_cnt)), 0, d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, d_hashtab, sampleStrides, threadCount,i,(i==0)?false:true);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
err = hipDeviceSynchronize();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipGetLastError();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(r1_dict_keys,d_r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(hashtab,d_hashtab,sizeof(Node*)*5003,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_hashtab to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_hashtab,hashtab,sizeof(Node*)*5003,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_hashtab (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling for compounds completed\n");
err = hipFree(d_r2_str);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_r1_dict_keys);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
float *d_z_score;
float *z_score_arr;
//Allocate array for Z-score, pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_z_score,sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_z_score (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_p_value;
float *p_value_arr;
//Allocate array for P-value, pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score and P-value calculation for Compounds begin\n");
printf("Kernel deviceZP launched with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0), 256);
totalTime += elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( deviceZP), dim3(ceil(r1_dict_cnt/256.0)), dim3(256), 0, 0, d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score, d_p_value);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
err = hipDeviceSynchronize();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipGetLastError();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation completed\n");
err = hipMemcpy(z_score_arr,d_z_score,sizeof(float)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_r1_dict_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_p_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_z_score);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize the sortedIndex array, as sortedIndex will have the values sorted with quickSort based on descending order of Z-score
//After sorting sortedIndex contains the new index of Z-score.
int * sortedIndex = (int*) malloc(sizeof(int)*r1_dict_cnt);
if(sortedIndex == NULL){
printf("malloc error for sortedIndex\n");
}
for(i=0;i<r1_dict_cnt;i++){
sortedIndex[i] = i;
}
quickSort(z_score_arr, sortedIndex, 0, r1_dict_cnt-1);
printf("Write extracted compounds to output files\n");
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcat(filename1,".txt");
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Compounds output files written\n");
free(r1_dict_keys);
err = hipHostFree(p_value_arr);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipHostFree(z_score_arr);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipHostFree(r1_dict_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(sortedIndex);
d_cinp1 = createDictionary();
printf("Processing proteins\n");
printf("Read input files\n");
// Reading the dictionary of proteins of result 1 - 3rd argument
inp_r1 = fopen(argv[3], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[3]);
exit(1);
}
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("newstr malloc failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct kvpair * e = searchDictionary(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
insertDictionary(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 4th argument
inp_r2 = fopen(argv[4], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[4]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("inp2_list[r2_cnt] malloc failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
//Sort inp2_list based on the number of tokens
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
//pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Process the input data for shipping
free(r1_dict_keys);
r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("r1_dict_keys malloc failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
destroyDictionary(d_cinp1);
//Strip off the number of tokens from list 2 records
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr2);
}
temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1 == NULL){
printf("temp1 malloc failed\n");
exit(0);
}
j=0;
for(i=0;i<r2_cnt;i++){
for(k=0;k<1000;k++){
if(k<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][k];
}
else
temp1[j++] = '^';
}
free(inp2_list[i]);
}
err = hipMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for list 2 records
err = hipMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for index of dictionary 1 value list
err = hipMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Sampling for proteins begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread need to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
totalTime += elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( deviceDDI), dim3((sampleTimes-i)>256?256:(sampleTimes-i)), dim3(threadCount), (sizeof(int)*(r1_dict_cnt)), 0, d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, d_hashtab, sampleStrides, threadCount,i,(i==0)?false:true);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
err = hipDeviceSynchronize();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipGetLastError();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(r1_dict_keys,d_r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(hashtab,d_hashtab,sizeof(Node*)*5003,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_hashtab to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_hashtab,hashtab,sizeof(Node*)*5003,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_hashtab (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,hipMemcpyHostToDevice);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling completed\n");
err = hipFree(d_r1_dict_keys);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_r2_str);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
//Allocate Z-score array pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//float * d_z_score_p;
err = hipMalloc((void **)&d_z_score,sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_z_score (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate P-value array pinned memory for optimized usage of memory transfer bandwidth
err = hipHostMalloc((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != hipSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins begin\n");
printf("Kernel deviceZP launch with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0),256);
totalTime += elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord( start, 0 );
hipLaunchKernelGGL(( deviceZP), dim3(ceil(r1_dict_cnt/256.0)), dim3(256), 0, 0, d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score, d_p_value);
hipEventRecord( stop, 0 );
hipEventSynchronize( stop );
hipEventElapsedTime( &elapsedTime, start, stop );
hipEventDestroy( start );
hipEventDestroy( stop );
err = hipDeviceSynchronize();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipGetLastError();
if(err != hipSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins completed\n");
printf( "\n******** Total Running Time of Kernel = %0.5f seconds ******* \n", (elapsedTime+totalTime)/1000);
printf("Copy output data to host memory\n");
err = hipMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_r1_dict_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_p_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(z_score_arr,d_z_score,sizeof(float)*r1_dict_cnt,hipMemcpyDeviceToHost);
if(err != hipSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_z_score);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize sortedIndex, this will hold correct index of the dictionary 1 records after sorting based on descending order of Z-score
sortedIndex = (int*)malloc(sizeof(int)*r1_dict_cnt);
if(sortedIndex == NULL){
printf("sortedIndex malloc error\n");
}
for(i=0;i<r1_dict_cnt;i++){
sortedIndex[i] = i;
}
//Sort the array based on descending order of Z-score
quickSort(z_score_arr, sortedIndex, 0, r1_dict_cnt-1);
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcat(filename1,".txt");
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
printf("Write extracted proteins to output files\n");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Processing completed\n");
free(r1_dict_keys);
err = hipHostFree(p_value_arr);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipHostFree(z_score_arr);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipHostFree(r1_dict_value);
if(err != hipSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(sortedIndex);
free(hashtab);
err = hipFree(d_hashtab);
if(err != hipSuccess){
fprintf(stderr,"Failed to free from device d_hashtab (error code %s) !\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
//End of program | 679aa73a555af53e38cdd7381e5fc426c4c76f8a.cu |
/* Execution Format : ./<exe> <drug_result_1_dict_compounds.txt> <drug_result_2_dict_compounds.txt> <drug_result_1_dict_proteins.txt> <drug_result_2_dict_proteins.txt> <para.txt> <drug name>
*/
#include <stdio.h>
#include <errno.h>
#include <math.h>
#include <string.h>
#include <unistd.h>
#include <stdlib.h>
#include <assert.h>
#include <sys/dir.h>
#include <stdbool.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <curand.h>
#include <curand_kernel.h>
//Structure for key-value pairs in dictionary
struct kvpair {
char *key;
int value;
struct kvpair *next;
};
//Structure for dictionary
typedef struct dictionary {
int size; // size of the pointer table
int n; // number of elements stored
struct kvpair **table;
}*Dictionary;
//Structure for nodes in the CUDA hashtable
typedef struct node {
char key[80];
int index;
struct node *next;
} Node;
//Function to compute hash value
unsigned long computeHash(const char *s)
{
unsigned const char *us;
unsigned long h;
h = 0;
for(us = (unsigned const char *) s; *us; us++) {
h = h * 401 + *us;
}
return h;
}
// Function to create empty dictionary
Dictionary createDictionary()
{
Dictionary d;
int i;
d = (Dictionary)malloc(sizeof(*d));
if(d==NULL){
printf("dictionary d malloc failed\n");
exit(0);
}
assert(d != 0);
d->size = 102397;
d->n = 0;
d->table = (kvpair **)malloc(sizeof(struct kvpair *) * d->size);
if(d->table==NULL){
printf("d->table malloc failed\n");
exit(0);
}
assert(d->table != 0);
for(i = 0; i < d->size; i++)
d->table[i] = 0;
return d;
}
// Function to insert a new key-value pair into the dictionary
void insertDictionary(Dictionary d, const char *key, int val)
{
struct kvpair *e;
unsigned long h;
assert(key);
assert(val);
e = (kvpair*)malloc(sizeof(*e));
if(e==NULL){
printf("e kvpair malloc failed\n");
exit(0);
}
assert(e);
e->key = strdup(key);
e->value = val;
h = computeHash(key) % d->size;
e->next = d->table[h];
d->table[h] = e;
d->n++;
return;
}
//Function to search for a key in the dictionary, returns NULL or the Node of the key if found in the dictionary
struct kvpair * searchDictionary(Dictionary d, const char *key)
{
struct kvpair *e;
for(e = d->table[computeHash(key) % d->size]; e != 0; e = e->next) {
if(!strcmp(e->key, key)) {
return e;
}
}
return NULL;
}
// Function to delete key-value pair in dictionary --this is currently not used
void deleteDictionary(Dictionary d, const char *key)
{
struct kvpair **prev;
struct kvpair *e;
for(prev = &(d->table[computeHash(key) % d->size]);
*prev != 0;
prev = &((*prev)->next)) {
if(!strcmp((*prev)->key, key)) {
e = *prev;
*prev = e->next;
free(e->key);
free(e);
return;
}
}
}
//Function to free dictionary
void destroyDictionary(Dictionary d)
{
int i;
struct kvpair *e;
struct kvpair *next;
for(i = 0; i < d->size; i++) {
for(e = d->table[i]; e != 0; e = next) {
next = e->next;
free(e->key);
free(e);
}
}
free(d->table);
free(d);
return;
}
//Function to compute normal distribution of a value, equivalent to Python's CDF.norm from NVIDIA CUDA samples
__device__ float CND(float d)
{
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double RSQRT2PI = 0.39894228040143267793994605993438;
double
K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double
cnd = RSQRT2PI * exp(- 0.5 * d * d) *
(K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
cnd = 1.0 - cnd;
return cnd;
}
//Function to remove specific characters from input string, used to remove carriage returns
void removeChar(char *str, char garbage) {
char *src, *dst;
for (src = dst = str; *src != '\0'; src++) {
*dst = *src;
if (*dst != garbage) dst++;
}
*dst = '\0';
return;
}
//Function used by qsort to sort the records based on number of tokens
int sort(const void* a, const void* b)
{
char *ia = strdup(*(const char **)a);
char *ib = strdup(*(const char **)b);
char *split1, *saveptr, *saveptr1;
split1 = strtok_r(ia, ";", &saveptr);
split1 = strtok_r(NULL, ";", &saveptr);
int x = atoi(split1);
split1 = strtok_r(ib, ";", &saveptr1);
split1 = strtok_r(NULL, ";", &saveptr1);
return (x-atoi(split1));
}
/*Kernel function performs random sampling
It is designed in this way: One block does one sampling and every thread processes one record. In case number of records to be processed exceed 1024, then some threads will take more than one stride. That is, some threads process more than one record during one sampling.
Stages in kernel function are:
1) Build the dictionary 's_hashtab' is shared memory, for O(1) time lookup of keyword while sampling, first thread will ensure all the keys 'd_r1_dict_keys' are linked in the hashtable.
2) Generate the random numbers and sort them. Every thread will generate a random number and first thread in every block will sort the random numbers using iterative quick sort function. In case number of random numbers required are higher than 1024, then some threads will take more strides to generate the required number of random numbers.
3) Shared memory initialization for sampling. This is required, as during sampling if keyword is found then we increment the count.
4) Sampling, every block performs one sampling. And, every thread will process atleast one record. That is, thread will extract the keywords/tokens in the record and then finds for the keyword in the dictionary, if found then shared memory is incremented.
5) Copy data to global memory from shared memory for Z-score and P-value calculation.
Note: Shared memory s_hashtab - is the hash table in the shared memory.
Shared memory s_r1_value_list - is the value list in the shared memory.
Arguments passed to kernel function:
* d_r2_str - is the list of records to be used for sampling.
* d_r1_dict_value - Global values for vector produced from sampling.
* sampleTimes - number of samples.
* sampleSize - size of the sample.
* randomRange - maximum value of each random number.
* r1_dict_cnt - number of keywords in dictionary 1.
* d_r1_dict_keys - keywords of dictionary 1, to populate hashtable in kernel function.
* d_hashtab - global memory hash table.
* sampleStrides - maximum number of strides every thread will take for sampling.
* threadCount - number of threads per block.
* samplesCompleted - number of samplings completed before this kernel launch.
* relaunch - to decide whether kernel is launched for the first time or relaunched.
*/
__global__ void deviceDDI(char * d_r2_str, int * d_r1_dict_value, int sampleTimes, int sampleSize, int randomRange, int r1_dict_cnt, Node *d_r1_dict_keys, Node **d_hashtab, int sampleStrides, int threadCount, int samplesCompleted, bool relaunch){
int i = blockDim.x * blockIdx.x + threadIdx.x;
int j, k, x = 0, ind1, random;
unsigned hashval;
char str_split[80];
extern __shared__ int s_r1_value_list[];
__shared__ Node *s_hashtab[5003];
//s_r1_value_list array has array for keywords found during sampling.
//This is used for incrementing array of keywords for sampling.
//Build the dictionary
if(threadIdx.x==0){
if(!relaunch){
//Initialize the hash table
for(j=0;j<5003;j++){
s_hashtab[j] = 0;
}
j=0;
k=0;
//Build the dictionary
for(j=0;j<r1_dict_cnt;j++){
x=0;
hashval = 0;
while(d_r1_dict_keys[j].key[x]!='\0'){
hashval = ((int)d_r1_dict_keys[j].key[x++] + 401*hashval)%5003;
}
d_r1_dict_keys[j].next = (s_hashtab[hashval]==0?NULL:s_hashtab[hashval]);
s_hashtab[hashval] = &d_r1_dict_keys[j];
}
for(j=0;j<5003;j++){
d_hashtab[j] = s_hashtab[j];
}
}
else{
for(j=0;j<5003;j++){
s_hashtab[j] = d_hashtab[j];
}
}
//Initialize the shared memory
for(j=0;j<(r1_dict_cnt);j++){
s_r1_value_list[j] = 0;
}
}
__syncthreads();
//Generate the random numbers
curandState_t state;
curand_init(clock64(), i, 0, &state);
//Sampling
for(j=0;(j<sampleStrides)&&((threadIdx.x+(j*threadCount))<sampleSize);j++){
//char str_split[80];
x=0;
hashval = 0;
random = curand(&state)%randomRange;
for(k=random*1000;k<random*1000+1000;k++){
if(d_r2_str[k] == '^')
break;
if(d_r2_str[k] != '~'){
str_split[x++] = d_r2_str[k];
hashval = ((int)d_r2_str[k] + 401*hashval)%5003;
}
else{
//str_split[x] = '\0';
Node *np = s_hashtab[hashval];
//np = s_hashtab[hashval];
while((np!= NULL)&&(np!=0)){
ind1 = 0;
while((np->key[ind1] != '\0')&&(ind1<x)){
if(np->key[ind1] == str_split[ind1])
ind1++;
else
break;
}
if((np->key[ind1] == '\0')&&(ind1==x)){
atomicAdd(&s_r1_value_list[(np->index)],1);
break;
}
if(np->next == NULL||np->next==0)
break;
np = np->next;
}
x=0;
hashval = 0;
}
}
}
__syncthreads();
//Copy to global memory from shared memory
if(threadIdx.x==0){
for(j=0;j<(r1_dict_cnt);j++){
d_r1_dict_value[(j)*(sampleTimes+1)+blockIdx.x+1+samplesCompleted] = s_r1_value_list[j];
}
}
}
/* deviceZP: Function to calculate P-value
One thread operates on one array to compute Z-score and P-value
Arguments passed to the kernel function:
* d_r1_dict_value - array holding the
* d_z_score - array to hold Z-scores.
* d_p_value - array to hold P-values.
*/
__global__ void deviceZP(int * d_r1_dict_value, int sampleTimes,int r1_dict_cnt, float * d_z_score, float * d_p_value){
int x, j, i = blockDim.x * blockIdx.x + threadIdx.x;
float mean =0,sd;
if(i<r1_dict_cnt){
x = 0;
sd = 0;
for(j=1;j<=sampleTimes;j++){
x += d_r1_dict_value[(i*(sampleTimes+1))+j];
}
mean = x/(sampleTimes);
for(j=1;j<=sampleTimes;j++){
sd += (d_r1_dict_value[(i*(sampleTimes+1))+j]-mean)*(d_r1_dict_value[(i*(sampleTimes+1))+j]-mean);
}
sd = sqrt(sd/(sampleTimes));
if(fabs(sd)>pow(10.0,-7))
d_z_score[i] = (d_r1_dict_value[i*(sampleTimes+1)] - mean)/sd;
else{
if(d_r1_dict_value[i*(sampleTimes+1)] != (int)mean)
d_z_score[i] = d_r1_dict_value[i*(sampleTimes+1)]*100;
else
d_z_score[i] = -100;
}
d_p_value[i] = 1-CND(d_z_score[i]);
}
}
//Function to partition records while sorting based on Z-score, called by quickSort
int partition( float a[], int index[], int l, int r) {
int i, j, t;
float temp;
float pivot = a[l];
i = l;
j = r+1;
while( 1)
{
do ++i;
while( a[i] >= pivot && i <= r );
do --j;
while( a[j] < pivot );
if( i >= j )
break;
temp = a[i];
a[i] = a[j];
a[j] = temp;
t = index[i];
index[i] = index[j];
index[j] = t;
}
temp = a[l];
a[l] = a[j];
a[j] = temp;
t = index[l];
index[l] = index[j];
index[j] = t;
return j;
}
//Function to quicksort the records based on Z-score
void quickSort(float a[], int index[], int l, int r)
{
int j;
if( l < r )
{
j = partition( a, index, l, r);
quickSort( a, index, l, j-1);
quickSort( a, index, j+1, r);
}
return;
}
int main(int argc, char *argv[])
{
if(argc!=7){
printf("\nIncorrect arguments passed, Please pass <Compounds with interactions>, <Compounds without interactions>, <Proteins with interactions>, <Proteins without interactions>, <PMID Substances>, <para.txt>, <Drug Name> as arguments\n");
exit(1);
}
FILE *inp_r1, *inp_r2, *inp_para, *op1, *op2;
char *split0,*split1, *saveptr, *saveptr1, *saveptr2, *inp2_list[100000];
char filename1[100], filename2[100], cutoffstr[20], pvaluestr[20], str1[10000], rmode[2] = "r";
size_t len = 0;
Dictionary d_cinp1;
int cutoff, sampleTimes, i=0, j=0, k=0, r1_cnt, r2_cnt, r1_dict_cnt, threadCount, sampleStrides;
float p_value, elapsedTime, totalTime=0;
cudaEvent_t start, stop;
printf("Drug name = %s\n",argv[6]);
printf("Read input files\n");
d_cinp1 = createDictionary();
//Read the parameters from para.txt - 4th argument
inp_para = fopen(argv[5],rmode);
if (inp_para == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[5]);
exit(1);
}
while(1)
{
fscanf(inp_para,"%[^\n]%*c", str1);
if(feof(inp_para)) break;
split0 = strtok_r(str1, "\t", &saveptr);
split1 = strtok_r(NULL, "\t", &saveptr);
removeChar(split0,'\r');
removeChar(split1,'\r');
if( strcmp(split0,"sampleTimes") == 0)
{
char temp[20];
strcpy(temp, split1);
sampleTimes = atoi(temp);
}
else if( strcmp(split0,"cutoff") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(cutoffstr,temp);
cutoff = atoi(temp);
}
else if( strcmp(split0,"p_value") == 0)
{
char temp[20];
strcpy(temp, split1);
strcpy(pvaluestr,temp);
p_value = atof(temp);
}
}
fclose(inp_para);
printf("Number of Samples = %d\n",sampleTimes);
if(sampleTimes <=0){
printf("Incorrect number of samples specified = %d, value of atleast 1 is expected\n", sampleTimes);
exit(0);
}
// Reading the dictionary of compounds of result 1 - 1st argument
// Create and populate dictionary 'd_cinp1' while reading the records
inp_r1 = fopen(argv[1], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[1]);
exit(1);
}
r1_cnt = 0;
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
r1_cnt++;
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("malloc to newstr failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct kvpair * e = searchDictionary(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
insertDictionary(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 2nd argument
inp_r2 = fopen(argv[2], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[2]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("malloc to inp2_list[r2_cnt] failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
printf("Sample size = %d\n", r1_cnt);
//Sort inp2_list based on the number of tokens or length
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
cudaSetDevice(0);
//populate value list for dictionary 1
cudaError_t err = cudaSuccess;
int * r1_dict_value;
//pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Device value list for dictionary 1
int *d_r1_dict_value;
Node * r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("malloc to r1_dict_keys failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
destroyDictionary(d_cinp1);
//Strip off the number of tokens from every record in list 2.
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr1);
}
//Process the records for shipping to kernel
char * temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1==NULL){
printf("temp1 malloc failed\n");
exit(0);
}
char * d_r2_str;
j=0;
for(i=0;i<r2_cnt;i++){
for(k=0;k<1000;k++){
if(k<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][k];
}
else
temp1[j++] = '^';
}
free(inp2_list[i]);
}
//char* d_r1_dict_list;
//Allocate global memory for dictionary 1 keywords
/*err = cudaMalloc((void **)&d_r1_dict_list,80*sizeof(char)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_list (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_list,r1_dict_list,80*sizeof(char)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_list (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}*/
//Allocate global memory for input list 2 records
err = cudaMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for dictionary 1 value list
err = cudaMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Node * d_r1_dict_keys = NULL;
err = cudaMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Node **hashtab;
Node **d_hashtab;
err = cudaMalloc((void **)&d_hashtab,sizeof(Node*)*5003);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_hashtab (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
hashtab = (Node**)malloc(sizeof(Node*)*5003);
if(hashtab==NULL){
printf("hashtab malloc failed\n");
exit(0);
}
printf("Sampling for compounds begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread needs to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceDDI<<<(sampleTimes-i)>256?256:(sampleTimes-i), threadCount, (sizeof(int)*(r1_dict_cnt))>>>(d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, d_hashtab, sampleStrides, threadCount,i,(i==0)?false:true);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaDeviceSynchronize();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_keys,d_r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(hashtab,d_hashtab,sizeof(Node*)*5003,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_hashtab to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_hashtab,hashtab,sizeof(Node*)*5003,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_hashtab (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling for compounds completed\n");
err = cudaFree(d_r2_str);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_keys);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
float *d_z_score;
float *z_score_arr;
//Allocate array for Z-score, pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_z_score,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
float *d_p_value;
float *p_value_arr;
//Allocate array for P-value, pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score and P-value calculation for Compounds begin\n");
printf("Kernel deviceZP launched with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0), 256);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceZP<<<ceil(r1_dict_cnt/256.0), 256>>>(d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score, d_p_value);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaDeviceSynchronize();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation completed\n");
err = cudaMemcpy(z_score_arr,d_z_score,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_p_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_z_score);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize the sortedIndex array, as sortedIndex will have the values sorted with quickSort based on descending order of Z-score
//After sorting sortedIndex contains the new index of Z-score.
int * sortedIndex = (int*) malloc(sizeof(int)*r1_dict_cnt);
if(sortedIndex == NULL){
printf("malloc error for sortedIndex\n");
}
for(i=0;i<r1_dict_cnt;i++){
sortedIndex[i] = i;
}
quickSort(z_score_arr, sortedIndex, 0, r1_dict_cnt-1);
printf("Write extracted compounds to output files\n");
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcat(filename1,".txt");
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_compounds_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Compounds output files written\n");
free(r1_dict_keys);
err = cudaFreeHost(p_value_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(z_score_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(sortedIndex);
d_cinp1 = createDictionary();
printf("Processing proteins\n");
printf("Read input files\n");
// Reading the dictionary of proteins of result 1 - 3rd argument
inp_r1 = fopen(argv[3], rmode);
if (inp_r1 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[3]);
exit(1);
}
r1_dict_cnt = 0;
while(1){
fscanf(inp_r1, "%[^\n]%*c", str1);
if( feof(inp_r1)) break;
removeChar(str1,'\r');
len = strlen(str1);
for(i=0;(i<len);i++){
char *newstr = (char*)malloc(len+1);
if(newstr==NULL){
printf("newstr malloc failed\n");
exit(0);
}
j=0;
while(str1[i] != '~'){
newstr[j++] = str1[i++];
}
newstr[j] = '\0';
struct kvpair * e = searchDictionary(d_cinp1,newstr);
if(e!=NULL){
e->value++;
}
else{
insertDictionary(d_cinp1,newstr,1);
r1_dict_cnt++;
}
free(newstr);
}
}
fclose(inp_r1);
// Reading the list of result 2- 4th argument
inp_r2 = fopen(argv[4], rmode);
if (inp_r2 == NULL)
{
fprintf(stderr, "Can't open input file %s!\n", argv[4]);
exit(1);
}
r2_cnt = 0;
while (1)
{
fscanf(inp_r2, "%[^\n]%*c", str1);
if( feof(inp_r2)) break;
removeChar(str1,'\r');
inp2_list[r2_cnt] = (char*)malloc(strlen(str1)+1);
if(inp2_list[r2_cnt]==NULL){
printf("inp2_list[r2_cnt] malloc failed\n");
exit(0);
}
strcpy(inp2_list[r2_cnt++],str1);
}
fclose(inp_r2);
printf("Input files read completed\n");
//Sort inp2_list based on the number of tokens
qsort(inp2_list,r2_cnt,sizeof(char *), sort);
printf("Pre-process records for kernel launch\n");
//pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&r1_dict_value, sizeof(int)*r1_dict_cnt*(sampleTimes+1));
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate r1_dict_value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Process the input data for shipping
free(r1_dict_keys);
r1_dict_keys = (Node*)malloc(sizeof(Node)*r1_dict_cnt);
if(r1_dict_keys==NULL){
printf("r1_dict_keys malloc failed\n");
exit(0);
}
j=0;
k=0;
for(i=0;i<d_cinp1->size;i++)
{
if(d_cinp1->table[i]!=0){
while(1)
{
strcpy(r1_dict_keys[j].key,d_cinp1->table[i]->key);
r1_dict_keys[j].index = j;
r1_dict_keys[j].next = NULL;
r1_dict_value[j*(sampleTimes+1)] = d_cinp1->table[i]->value;
j++;
if(d_cinp1->table[i]->next!= NULL)
d_cinp1->table[i] = d_cinp1->table[i]->next;
else
break;
}
}
}
destroyDictionary(d_cinp1);
//Strip off the number of tokens from list 2 records
for(i=0;i<r2_cnt;i++){
split0 = strtok_r(inp2_list[i], ";", &saveptr2);
}
temp1 = (char*) malloc(1000*r2_cnt*sizeof(char));
if(temp1 == NULL){
printf("temp1 malloc failed\n");
exit(0);
}
j=0;
for(i=0;i<r2_cnt;i++){
for(k=0;k<1000;k++){
if(k<strlen(inp2_list[i])){
temp1[j++] = inp2_list[i][k];
}
else
temp1[j++] = '^';
}
free(inp2_list[i]);
}
err = cudaMalloc((void **)&d_r1_dict_keys,sizeof(Node)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for list 2 records
err = cudaMalloc((void **)&d_r2_str,1000*sizeof(char)*r2_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate global memory for index of dictionary 1 value list
err = cudaMalloc((void **)&d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value,r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Sampling for proteins begin\n");
for(i=0;i<sampleTimes;i=i+256){
threadCount = (r1_cnt>1024)?1024:r1_cnt;
//sampleStrides: maximum number of strides every thread need to take for sampling
sampleStrides = ceil(r1_cnt/threadCount);
printf("Kernel deviceDDI launched with %d blocks of %d threads each\n", (sampleTimes-i)>256?256:(sampleTimes-i), threadCount);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceDDI<<<(sampleTimes-i)>256?256:(sampleTimes-i), threadCount, (sizeof(int)*(r1_dict_cnt))>>>( d_r2_str, d_r1_dict_value, sampleTimes, r1_cnt, r2_cnt, r1_dict_cnt, d_r1_dict_keys, d_hashtab, sampleStrides, threadCount,i,(i==0)?false:true);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaDeviceSynchronize();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceDDI kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_keys,d_r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_keys,r1_dict_keys,sizeof(Node)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(r1_dict_value,d_r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from device to Host(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r1_dict_value, r1_dict_value,sizeof(int)*(sampleTimes+1)*r1_dict_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r1_dict_value from host to device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(hashtab,d_hashtab,sizeof(Node*)*5003,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_hashtab to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_hashtab,hashtab,sizeof(Node*)*5003,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_hashtab (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_r2_str,temp1,1000*sizeof(char)*r2_cnt,cudaMemcpyHostToDevice);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
printf("Sampling completed\n");
err = cudaFree(d_r1_dict_keys);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_keys (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r2_str);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r2_str (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(temp1);
//Allocate Z-score array pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&z_score_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate z-score host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//float * d_z_score_p;
err = cudaMalloc((void **)&d_z_score,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Allocate P-value array pinned memory for optimized usage of memory transfer bandwidth
err = cudaMallocHost((void**)&p_value_arr, sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate p-value host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc((void **)&d_p_value,sizeof(float)*r1_dict_cnt);
if(err != cudaSuccess){
fprintf(stderr,"Failed to allocate device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins begin\n");
printf("Kernel deviceZP launch with %d blocks of %d threads each\n", (int)ceil(r1_dict_cnt/256.0),256);
totalTime += elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord( start, 0 );
deviceZP<<<ceil(r1_dict_cnt/256.0), 256>>>(d_r1_dict_value, sampleTimes, r1_dict_cnt, d_z_score, d_p_value);
cudaEventRecord( stop, 0 );
cudaEventSynchronize( stop );
cudaEventElapsedTime( &elapsedTime, start, stop );
cudaEventDestroy( start );
cudaEventDestroy( stop );
err = cudaDeviceSynchronize();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaGetLastError();
if(err != cudaSuccess){
fprintf(stderr,"Failed to launch deviceZP kernel device(error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("Z-score, P-value calculation for proteins completed\n");
printf( "\n******** Total Running Time of Kernel = %0.5f seconds ******* \n", (elapsedTime+totalTime)/1000);
printf("Copy output data to host memory\n");
err = cudaMemcpy(p_value_arr,d_p_value,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from p-value device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_p_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_p_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(z_score_arr,d_z_score,sizeof(float)*r1_dict_cnt,cudaMemcpyDeviceToHost);
if(err != cudaSuccess){
fprintf(stderr,"Failed to copy from z-score device to host (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_z_score);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_z_score (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
//Initialize sortedIndex, this will hold correct index of the dictionary 1 records after sorting based on descending order of Z-score
sortedIndex = (int*)malloc(sizeof(int)*r1_dict_cnt);
if(sortedIndex == NULL){
printf("sortedIndex malloc error\n");
}
for(i=0;i<r1_dict_cnt;i++){
sortedIndex[i] = i;
}
//Sort the array based on descending order of Z-score
quickSort(z_score_arr, sortedIndex, 0, r1_dict_cnt-1);
//Write to output files
strcpy(filename1, argv[6]);
strcat(filename1, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename1,cutoffstr);
strcat(filename1,"_p_");
strcat(filename1,pvaluestr);
strcat(filename1,".txt");
strcpy(filename2, argv[6]);
strcat(filename2, "_temp_result1_Substance_proteins_cutoff_");
strcat(filename2,cutoffstr);
strcat(filename2,".txt");
printf("Write extracted proteins to output files\n");
op1 = fopen(filename1, "w");
fprintf(op1,"Term Pair\tDistribution\tZ-Score\tP-value\n");
op2 = fopen(filename2, "w");
fprintf(op2,"Term Pair\tDistribution\tZ-Score\tP-value\n");
k=0;
for(i=0;i<r1_dict_cnt;i++){
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=p_value)){
fprintf(op1,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<sampleTimes;j++)
fprintf(op1,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op1,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
if((r1_dict_value[(sortedIndex[i]*(sampleTimes+1))]>=cutoff) && (p_value_arr[sortedIndex[i]]<=1.0)){
fprintf(op2,"%s;%s\t[",argv[6],r1_dict_keys[sortedIndex[i]].key);
for(j=0;j<=sampleTimes;j++)
fprintf(op2,"%d, ",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j]);
fprintf(op2,"%d]\t%f\t%f\n",r1_dict_value[(sortedIndex[i]*(sampleTimes+1))+j], z_score_arr[i],p_value_arr[sortedIndex[i]]);
}
k++;
}
fclose(op1);
fclose(op2);
printf("Processing completed\n");
free(r1_dict_keys);
err = cudaFreeHost(p_value_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host p_value_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(z_score_arr);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host z_score_arr (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFreeHost(r1_dict_value);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free pinned host r1_dict_value (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(sortedIndex);
free(hashtab);
err = cudaFree(d_hashtab);
if(err != cudaSuccess){
fprintf(stderr,"Failed to free from device d_hashtab (error code %s) !\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
//End of program |
6330836680236f8ab74f4cfc80877509d053aa7b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cfloat>
#include <rocblas.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void tauInitial(double *, double *, fcndata &);
void vertexToEdge(double *, double *, int *, int, int);
void vertexToBottom(double *, double *, int *, int, int);
void computeFrame(double *, double *, double *, fcndata &);
void assembleFemMatrix(double *, double *, double *, double *, double *, fcndata &);
void yankTauActivity(double *, double *, fcndata &);
void reactionTauActivity(double *, double *, fcndata &);
void computeKernel(double *, double *, fcndata &);
void addEpsIdentity(double *, double, int);
void cholesky(double *, fcndata &);
void computeExternalYank(double *, double *, double *, double *, fcndata &);
int ipcg(double *, double *, fcndata &);
void vectoraxpby(double *, double, double *, double, double *, int);
void assembleFemVector(double *, double *, double *, fcndata &);
void deform(double *h_objPtr, double *h_posVec, fcndata &fcnObj)
{
int lmkNum = fcnObj.prm.lmkNum;
int elmNum = fcnObj.prm.elmNum;
int btmElmNum = fcnObj.prm.btmElmNum;
int timeNum = fcnObj.prm.timeNum;
double timeStp = fcnObj.prm.timeStp;
hipMemcpy(fcnObj.d_lmkStk, fcnObj.prm.d_lmkIniMat,
sizeof(double) * lmkNum * DIMNUM, hipMemcpyDeviceToDevice);
hipMemset(fcnObj.d_vlcStk, 0, sizeof(double) * lmkNum * DIMNUM);
hipMemset(fcnObj.d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM);
tauInitial(fcnObj.d_tauMat, h_posVec, fcnObj);
vertexToEdge(fcnObj.d_lmkNowEdgMat, fcnObj.prm.d_lmkIniMat,
fcnObj.elm.d_elmVtxMat, lmkNum, elmNum);
computeFrame(fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj.prm.d_lmkIniMat, fcnObj);
assembleFemMatrix(fcnObj.d_ppdNowMat, fcnObj.d_ggdNowMat,
fcnObj.d_lmkNowEdgMat, fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj);
for ( int timeIdx = 0; timeIdx < timeNum - 1; ++timeIdx )
{
fcnObj.d_lmkNowMat = fcnObj.d_lmkStk + timeIdx * lmkNum * DIMNUM;
fcnObj.d_lmkNxtMat = fcnObj.d_lmkStk + (timeIdx + 1) * lmkNum * DIMNUM;
fcnObj.d_tauNowVec = fcnObj.d_tauMat + timeIdx * lmkNum;
fcnObj.d_tauNxtVec = fcnObj.d_tauMat + (timeIdx + 1) * lmkNum;
fcnObj.d_vlcMat = fcnObj.d_vlcStk + timeIdx * lmkNum * DIMNUM;
vertexToBottom(fcnObj.d_lmkNowBtmMat, fcnObj.d_lmkNowMat,
fcnObj.elm.d_btmVtxMat, lmkNum, btmElmNum);
yankTauActivity(fcnObj.d_ynkActFcnNowVec, fcnObj.d_tauNowVec, fcnObj);
reactionTauActivity(fcnObj.d_reaActFcnNowVec, fcnObj.d_tauNowVec, fcnObj);
computeKernel(fcnObj.d_knlMat, fcnObj.d_lmkNowMat, fcnObj);
addEpsIdentity(fcnObj.d_knlMat, fcnObj.prm.knlEps, lmkNum);
hipMemcpy(fcnObj.d_knLMat, fcnObj.d_knlMat,
sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice);
cholesky(fcnObj.d_knLMat, fcnObj);
computeExternalYank(fcnObj.d_exYMat, fcnObj.d_lmkNowMat, fcnObj.d_lmkNowEdgMat,
fcnObj.d_ynkActFcnNowVec, fcnObj);
int cgStatus = ipcg(fcnObj.d_vlcMat, fcnObj.d_exYMat, fcnObj);
if ( cgStatus != 0 )
{
*h_objPtr = DBL_MAX;
return;
}
if ( timeIdx < timeNum - 2 )
{
hipMemcpy(fcnObj.d_vlcMat + lmkNum * DIMNUM, fcnObj.d_vlcMat,
sizeof(double) * lmkNum * DIMNUM, hipMemcpyDeviceToDevice);
}
vectoraxpby(fcnObj.d_lmkNxtMat,
1.0, fcnObj.d_lmkNowMat, timeStp, fcnObj.d_vlcMat, lmkNum * DIMNUM);
vertexToEdge(fcnObj.d_lmkNxtEdgMat, fcnObj.d_lmkNxtMat,
fcnObj.elm.d_elmVtxMat, lmkNum, elmNum);
computeFrame(fcnObj.d_nmlNxtMat, fcnObj.d_tsvNxtMat, fcnObj.d_lmkNxtMat, fcnObj);
assembleFemMatrix(fcnObj.d_ppdNxtMat, fcnObj.d_ggdNxtMat,
fcnObj.d_lmkNxtEdgMat, fcnObj.d_nmlNxtMat, fcnObj.d_tsvNxtMat, fcnObj);
assembleFemVector(fcnObj.d_femRpdVec, fcnObj.d_lmkNowEdgMat, fcnObj.d_reaActFcnNowVec, fcnObj);
vectoraxpby(fcnObj.d_femLftMat, 1.0, fcnObj.d_ppdNxtMat, timeStp, fcnObj.d_ggdNxtMat, lmkNum * lmkNum);
hipMemcpy(fcnObj.d_femPpdMat, fcnObj.d_ppdNowMat, sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice);
double oneVal = 1.0;
hipblasDgemv(fcnObj.blasHdl, HIPBLAS_OP_N, lmkNum, lmkNum,
&oneVal, fcnObj.d_femPpdMat, lmkNum, fcnObj.d_tauNowVec, 1,
&timeStp, fcnObj.d_femRpdVec, 1);
hipsolverDnDpotrf(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER,
lmkNum, fcnObj.d_femLftMat, lmkNum,
fcnObj.d_workspace, fcnObj.h_Lwork, fcnObj.d_status);
hipMemcpy(fcnObj.d_tauNxtVec, fcnObj.d_femRpdVec, sizeof(double) * lmkNum, hipMemcpyDeviceToDevice);
hipsolverDnDpotrs(fcnObj.solvHdl, HIPBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_femLftMat, lmkNum,
fcnObj.d_tauNxtVec, lmkNum, fcnObj.d_status);
hipMemcpy(fcnObj.d_lmkNowEdgMat, fcnObj.d_lmkNxtEdgMat,
sizeof(double) * elmNum * DIMNUM * (VTXNUM - 1), hipMemcpyDeviceToDevice);
hipMemcpy(fcnObj.d_nmlNowMat, fcnObj.d_nmlNxtMat,
sizeof(double) * elmNum * DIMNUM, hipMemcpyDeviceToDevice);
hipMemcpy(fcnObj.d_tsvNowMat, fcnObj.d_tsvNxtMat,
sizeof(double) * elmNum * DIMNUM, hipMemcpyDeviceToDevice);
hipMemcpy(fcnObj.d_ppdNowMat, fcnObj.d_ppdNxtMat,
sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice);
hipMemcpy(fcnObj.d_ggdNowMat, fcnObj.d_ggdNxtMat,
sizeof(double) * lmkNum * lmkNum, hipMemcpyDeviceToDevice);
}
return;
}
| 6330836680236f8ab74f4cfc80877509d053aa7b.cu | #include <cfloat>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "struct.h"
#include "constants.h"
void tauInitial(double *, double *, fcndata &);
void vertexToEdge(double *, double *, int *, int, int);
void vertexToBottom(double *, double *, int *, int, int);
void computeFrame(double *, double *, double *, fcndata &);
void assembleFemMatrix(double *, double *, double *, double *, double *, fcndata &);
void yankTauActivity(double *, double *, fcndata &);
void reactionTauActivity(double *, double *, fcndata &);
void computeKernel(double *, double *, fcndata &);
void addEpsIdentity(double *, double, int);
void cholesky(double *, fcndata &);
void computeExternalYank(double *, double *, double *, double *, fcndata &);
int ipcg(double *, double *, fcndata &);
void vectoraxpby(double *, double, double *, double, double *, int);
void assembleFemVector(double *, double *, double *, fcndata &);
void deform(double *h_objPtr, double *h_posVec, fcndata &fcnObj)
{
int lmkNum = fcnObj.prm.lmkNum;
int elmNum = fcnObj.prm.elmNum;
int btmElmNum = fcnObj.prm.btmElmNum;
int timeNum = fcnObj.prm.timeNum;
double timeStp = fcnObj.prm.timeStp;
cudaMemcpy(fcnObj.d_lmkStk, fcnObj.prm.d_lmkIniMat,
sizeof(double) * lmkNum * DIMNUM, cudaMemcpyDeviceToDevice);
cudaMemset(fcnObj.d_vlcStk, 0, sizeof(double) * lmkNum * DIMNUM);
cudaMemset(fcnObj.d_pnlMat, 0, sizeof(double) * lmkNum * DIMNUM);
tauInitial(fcnObj.d_tauMat, h_posVec, fcnObj);
vertexToEdge(fcnObj.d_lmkNowEdgMat, fcnObj.prm.d_lmkIniMat,
fcnObj.elm.d_elmVtxMat, lmkNum, elmNum);
computeFrame(fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj.prm.d_lmkIniMat, fcnObj);
assembleFemMatrix(fcnObj.d_ppdNowMat, fcnObj.d_ggdNowMat,
fcnObj.d_lmkNowEdgMat, fcnObj.d_nmlNowMat, fcnObj.d_tsvNowMat, fcnObj);
for ( int timeIdx = 0; timeIdx < timeNum - 1; ++timeIdx )
{
fcnObj.d_lmkNowMat = fcnObj.d_lmkStk + timeIdx * lmkNum * DIMNUM;
fcnObj.d_lmkNxtMat = fcnObj.d_lmkStk + (timeIdx + 1) * lmkNum * DIMNUM;
fcnObj.d_tauNowVec = fcnObj.d_tauMat + timeIdx * lmkNum;
fcnObj.d_tauNxtVec = fcnObj.d_tauMat + (timeIdx + 1) * lmkNum;
fcnObj.d_vlcMat = fcnObj.d_vlcStk + timeIdx * lmkNum * DIMNUM;
vertexToBottom(fcnObj.d_lmkNowBtmMat, fcnObj.d_lmkNowMat,
fcnObj.elm.d_btmVtxMat, lmkNum, btmElmNum);
yankTauActivity(fcnObj.d_ynkActFcnNowVec, fcnObj.d_tauNowVec, fcnObj);
reactionTauActivity(fcnObj.d_reaActFcnNowVec, fcnObj.d_tauNowVec, fcnObj);
computeKernel(fcnObj.d_knlMat, fcnObj.d_lmkNowMat, fcnObj);
addEpsIdentity(fcnObj.d_knlMat, fcnObj.prm.knlEps, lmkNum);
cudaMemcpy(fcnObj.d_knLMat, fcnObj.d_knlMat,
sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice);
cholesky(fcnObj.d_knLMat, fcnObj);
computeExternalYank(fcnObj.d_exYMat, fcnObj.d_lmkNowMat, fcnObj.d_lmkNowEdgMat,
fcnObj.d_ynkActFcnNowVec, fcnObj);
int cgStatus = ipcg(fcnObj.d_vlcMat, fcnObj.d_exYMat, fcnObj);
if ( cgStatus != 0 )
{
*h_objPtr = DBL_MAX;
return;
}
if ( timeIdx < timeNum - 2 )
{
cudaMemcpy(fcnObj.d_vlcMat + lmkNum * DIMNUM, fcnObj.d_vlcMat,
sizeof(double) * lmkNum * DIMNUM, cudaMemcpyDeviceToDevice);
}
vectoraxpby(fcnObj.d_lmkNxtMat,
1.0, fcnObj.d_lmkNowMat, timeStp, fcnObj.d_vlcMat, lmkNum * DIMNUM);
vertexToEdge(fcnObj.d_lmkNxtEdgMat, fcnObj.d_lmkNxtMat,
fcnObj.elm.d_elmVtxMat, lmkNum, elmNum);
computeFrame(fcnObj.d_nmlNxtMat, fcnObj.d_tsvNxtMat, fcnObj.d_lmkNxtMat, fcnObj);
assembleFemMatrix(fcnObj.d_ppdNxtMat, fcnObj.d_ggdNxtMat,
fcnObj.d_lmkNxtEdgMat, fcnObj.d_nmlNxtMat, fcnObj.d_tsvNxtMat, fcnObj);
assembleFemVector(fcnObj.d_femRpdVec, fcnObj.d_lmkNowEdgMat, fcnObj.d_reaActFcnNowVec, fcnObj);
vectoraxpby(fcnObj.d_femLftMat, 1.0, fcnObj.d_ppdNxtMat, timeStp, fcnObj.d_ggdNxtMat, lmkNum * lmkNum);
cudaMemcpy(fcnObj.d_femPpdMat, fcnObj.d_ppdNowMat, sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice);
double oneVal = 1.0;
cublasDgemv(fcnObj.blasHdl, CUBLAS_OP_N, lmkNum, lmkNum,
&oneVal, fcnObj.d_femPpdMat, lmkNum, fcnObj.d_tauNowVec, 1,
&timeStp, fcnObj.d_femRpdVec, 1);
cusolverDnDpotrf(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER,
lmkNum, fcnObj.d_femLftMat, lmkNum,
fcnObj.d_workspace, fcnObj.h_Lwork, fcnObj.d_status);
cudaMemcpy(fcnObj.d_tauNxtVec, fcnObj.d_femRpdVec, sizeof(double) * lmkNum, cudaMemcpyDeviceToDevice);
cusolverDnDpotrs(fcnObj.solvHdl, CUBLAS_FILL_MODE_LOWER, lmkNum, DIMNUM, fcnObj.d_femLftMat, lmkNum,
fcnObj.d_tauNxtVec, lmkNum, fcnObj.d_status);
cudaMemcpy(fcnObj.d_lmkNowEdgMat, fcnObj.d_lmkNxtEdgMat,
sizeof(double) * elmNum * DIMNUM * (VTXNUM - 1), cudaMemcpyDeviceToDevice);
cudaMemcpy(fcnObj.d_nmlNowMat, fcnObj.d_nmlNxtMat,
sizeof(double) * elmNum * DIMNUM, cudaMemcpyDeviceToDevice);
cudaMemcpy(fcnObj.d_tsvNowMat, fcnObj.d_tsvNxtMat,
sizeof(double) * elmNum * DIMNUM, cudaMemcpyDeviceToDevice);
cudaMemcpy(fcnObj.d_ppdNowMat, fcnObj.d_ppdNxtMat,
sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice);
cudaMemcpy(fcnObj.d_ggdNowMat, fcnObj.d_ggdNxtMat,
sizeof(double) * lmkNum * lmkNum, cudaMemcpyDeviceToDevice);
}
return;
}
|
eb2e9610bb721269043668ca2bb1346b28deabe2.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexFlat.h>
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <gtest/gtest.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <sstream>
#include <vector>
void testTransposition(
bool colMajorVecs,
bool colMajorQueries,
faiss::MetricType metric,
float metricArg = 0) {
using namespace faiss::gpu;
int device = randVal(0, getNumDevices() - 1);
StandardGpuResources res;
res.noTempMemory();
int dim = randVal(20, 150);
int numVecs = randVal(10, 30000);
int numQuery = randVal(1, 1024);
int k = ::min(numVecs, randVal(20, 70));
// Input data for CPU
std::vector<float> vecs = randVecs(numVecs, dim);
std::vector<float> queries = randVecs(numQuery, dim);
if (metric == faiss::MetricType::METRIC_JensenShannon) {
// make values positive
for (auto& v : vecs) {
v = std::abs(v);
if (v == 0) {
v = 1e-6;
}
}
for (auto& q : queries) {
q = std::abs(q);
if (q == 0) {
q = 1e-6;
}
}
}
// The CPU index is our reference for the results
faiss::IndexFlat cpuIndex(dim, metric);
cpuIndex.metric_arg = metricArg;
cpuIndex.add(numVecs, vecs.data());
std::vector<float> cpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1);
cpuIndex.search(
numQuery, queries.data(), k, cpuDistance.data(), cpuIndices.data());
// The transpose and distance code assumes the desired device is already set
DeviceScope scope(device);
auto stream = res.getDefaultStream(device);
// Copy input data to GPU, and pre-transpose both vectors and queries for
// passing
auto gpuVecs = toDeviceNonTemporary<float, 2>(
res.getResources().get(),
device,
vecs.data(),
stream,
{numVecs, dim});
auto gpuQueries = toDeviceNonTemporary<float, 2>(
res.getResources().get(),
device,
queries.data(),
stream,
{numQuery, dim});
DeviceTensor<float, 2, true> vecsT(
res.getResources().get(),
makeDevAlloc(AllocType::Other, stream),
{dim, numVecs});
runTransposeAny(gpuVecs, 0, 1, vecsT, stream);
DeviceTensor<float, 2, true> queriesT(
res.getResources().get(),
makeDevAlloc(AllocType::Other, stream),
{dim, numQuery});
runTransposeAny(gpuQueries, 0, 1, queriesT, stream);
std::vector<float> gpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1);
GpuDistanceParams args;
args.metric = metric;
args.metricArg = metricArg;
args.k = k;
args.dims = dim;
args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data();
args.vectorsRowMajor = !colMajorVecs;
args.numVectors = numVecs;
args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data();
args.queriesRowMajor = !colMajorQueries;
args.numQueries = numQuery;
args.outDistances = gpuDistance.data();
args.outIndices = gpuIndices.data();
bfKnn(&res, args);
std::stringstream str;
str << "metric " << metric << " colMajorVecs " << colMajorVecs
<< " colMajorQueries " << colMajorQueries;
compareLists(
cpuDistance.data(),
cpuIndices.data(),
gpuDistance.data(),
gpuIndices.data(),
numQuery,
k,
str.str(),
false,
false,
true,
6e-3f,
0.1f,
0.015f);
}
// Test different memory layouts for brute-force k-NN
TEST(TestGpuDistance, Transposition_RR) {
testTransposition(false, false, faiss::MetricType::METRIC_L2);
testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT);
}
TEST(TestGpuDistance, Transposition_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, L1) {
testTransposition(false, false, faiss::MetricType::METRIC_L1);
}
// Test other transpositions with the general distance kernel
TEST(TestGpuDistance, L1_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L1);
}
// Test remainder of metric types
TEST(TestGpuDistance, Linf) {
testTransposition(false, false, faiss::MetricType::METRIC_Linf);
}
TEST(TestGpuDistance, Lp) {
testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3);
}
TEST(TestGpuDistance, Canberra) {
testTransposition(false, false, faiss::MetricType::METRIC_Canberra);
}
TEST(TestGpuDistance, BrayCurtis) {
testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis);
}
TEST(TestGpuDistance, JensenShannon) {
testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
| eb2e9610bb721269043668ca2bb1346b28deabe2.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/IndexFlat.h>
#include <faiss/gpu/GpuDistance.h>
#include <faiss/gpu/StandardGpuResources.h>
#include <faiss/gpu/test/TestUtils.h>
#include <faiss/gpu/utils/DeviceUtils.h>
#include <gtest/gtest.h>
#include <faiss/gpu/utils/CopyUtils.cuh>
#include <faiss/gpu/utils/Transpose.cuh>
#include <sstream>
#include <vector>
void testTransposition(
bool colMajorVecs,
bool colMajorQueries,
faiss::MetricType metric,
float metricArg = 0) {
using namespace faiss::gpu;
int device = randVal(0, getNumDevices() - 1);
StandardGpuResources res;
res.noTempMemory();
int dim = randVal(20, 150);
int numVecs = randVal(10, 30000);
int numQuery = randVal(1, 1024);
int k = std::min(numVecs, randVal(20, 70));
// Input data for CPU
std::vector<float> vecs = randVecs(numVecs, dim);
std::vector<float> queries = randVecs(numQuery, dim);
if (metric == faiss::MetricType::METRIC_JensenShannon) {
// make values positive
for (auto& v : vecs) {
v = std::abs(v);
if (v == 0) {
v = 1e-6;
}
}
for (auto& q : queries) {
q = std::abs(q);
if (q == 0) {
q = 1e-6;
}
}
}
// The CPU index is our reference for the results
faiss::IndexFlat cpuIndex(dim, metric);
cpuIndex.metric_arg = metricArg;
cpuIndex.add(numVecs, vecs.data());
std::vector<float> cpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> cpuIndices(numQuery * k, -1);
cpuIndex.search(
numQuery, queries.data(), k, cpuDistance.data(), cpuIndices.data());
// The transpose and distance code assumes the desired device is already set
DeviceScope scope(device);
auto stream = res.getDefaultStream(device);
// Copy input data to GPU, and pre-transpose both vectors and queries for
// passing
auto gpuVecs = toDeviceNonTemporary<float, 2>(
res.getResources().get(),
device,
vecs.data(),
stream,
{numVecs, dim});
auto gpuQueries = toDeviceNonTemporary<float, 2>(
res.getResources().get(),
device,
queries.data(),
stream,
{numQuery, dim});
DeviceTensor<float, 2, true> vecsT(
res.getResources().get(),
makeDevAlloc(AllocType::Other, stream),
{dim, numVecs});
runTransposeAny(gpuVecs, 0, 1, vecsT, stream);
DeviceTensor<float, 2, true> queriesT(
res.getResources().get(),
makeDevAlloc(AllocType::Other, stream),
{dim, numQuery});
runTransposeAny(gpuQueries, 0, 1, queriesT, stream);
std::vector<float> gpuDistance(numQuery * k, 0);
std::vector<faiss::Index::idx_t> gpuIndices(numQuery * k, -1);
GpuDistanceParams args;
args.metric = metric;
args.metricArg = metricArg;
args.k = k;
args.dims = dim;
args.vectors = colMajorVecs ? vecsT.data() : gpuVecs.data();
args.vectorsRowMajor = !colMajorVecs;
args.numVectors = numVecs;
args.queries = colMajorQueries ? queriesT.data() : gpuQueries.data();
args.queriesRowMajor = !colMajorQueries;
args.numQueries = numQuery;
args.outDistances = gpuDistance.data();
args.outIndices = gpuIndices.data();
bfKnn(&res, args);
std::stringstream str;
str << "metric " << metric << " colMajorVecs " << colMajorVecs
<< " colMajorQueries " << colMajorQueries;
compareLists(
cpuDistance.data(),
cpuIndices.data(),
gpuDistance.data(),
gpuIndices.data(),
numQuery,
k,
str.str(),
false,
false,
true,
6e-3f,
0.1f,
0.015f);
}
// Test different memory layouts for brute-force k-NN
TEST(TestGpuDistance, Transposition_RR) {
testTransposition(false, false, faiss::MetricType::METRIC_L2);
testTransposition(false, false, faiss::MetricType::METRIC_INNER_PRODUCT);
}
TEST(TestGpuDistance, Transposition_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, Transposition_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L2);
}
TEST(TestGpuDistance, L1) {
testTransposition(false, false, faiss::MetricType::METRIC_L1);
}
// Test other transpositions with the general distance kernel
TEST(TestGpuDistance, L1_RC) {
testTransposition(false, true, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CR) {
testTransposition(true, false, faiss::MetricType::METRIC_L1);
}
TEST(TestGpuDistance, L1_CC) {
testTransposition(true, true, faiss::MetricType::METRIC_L1);
}
// Test remainder of metric types
TEST(TestGpuDistance, Linf) {
testTransposition(false, false, faiss::MetricType::METRIC_Linf);
}
TEST(TestGpuDistance, Lp) {
testTransposition(false, false, faiss::MetricType::METRIC_Lp, 3);
}
TEST(TestGpuDistance, Canberra) {
testTransposition(false, false, faiss::MetricType::METRIC_Canberra);
}
TEST(TestGpuDistance, BrayCurtis) {
testTransposition(false, false, faiss::MetricType::METRIC_BrayCurtis);
}
TEST(TestGpuDistance, JensenShannon) {
testTransposition(false, false, faiss::MetricType::METRIC_JensenShannon);
}
int main(int argc, char** argv) {
testing::InitGoogleTest(&argc, argv);
// just run with a fixed test seed
faiss::gpu::setTestSeed(100);
return RUN_ALL_TESTS();
}
|
6dd638d8d22d18475f0c8e487ccc4dca17d4f596.hip | // !!! This is a file automatically generated by hipify!!!
/*
* This example is taken from the NVIDIA documentation (Copyright 1993-2013
* NVIDIA Corporation) and has been adapted to show the use of CUPTI in
* collecting event counters for multiple GPU contexts.
*
* 'likeComp' does the job the component does: breaking the metric events
* out into a list and then building a group from that list, and trying to
* read it.
*/
/*
* This software contains source code provided by NVIDIA Corporation
*
* According to the Nvidia EULA (compute 5.5 version)
* http://developer.download.nvidia.com/compute/cuda/5_5/rel/docs/EULA.pdf
*
* Chapter 2. NVIDIA CORPORATION CUDA SAMPLES END USER LICENSE AGREEMENT
* 2.1.1. Source Code
* Developer shall have the right to modify and create derivative works with the Source
* Code. Developer shall own any derivative works ("Derivatives") it creates to the Source
* Code, provided that Developer uses the Materials in accordance with the terms and
* conditions of this Agreement. Developer may distribute the Derivatives, provided that
* all NVIDIA copyright notices and trademarks are propagated and used properly and
* the Derivatives include the following statement: This software contains source code
* provided by NVIDIA Corporation.
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi.h"
#include "papi_test.h"
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
char *NameToCollect = NULL;
#define CHECK_CU_ERROR(err, cufunc) \
if (err != hipSuccess) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != hipSuccess) { printf ("%s:%i Error %d for CUDA [%s]\n", __FILE__, __LINE__, err, hipGetErrorString(err) ); return -1; }
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define CHECK_ALLOC_ERROR(var) \
do { \
if (var == NULL) { \
fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
static void printUsage() {
printf("usage: Perform a CUPTI only test of an event or metric.\n");
printf(" -help : display help message\n");
printf(" EVENT_NAME : or Metric, must be the LAST argument, after any flags.\n");
printf("Note the PAPI prefix of 'cuda:::event:' or 'cuda:::metric:' should be left off,\n");
printf("also any ':device=n' suffix. Those are PAPI added elements for disambiguation. \n");
}
void parseCommandLineArgs(int argc, char *argv[])
{
if (argc < 2) {
printf("Invalid number of options\n");
printUsage();
exit(0);
}
NameToCollect = argv[1]; // Record name to collect.
} // end routine.
//-----------------------------------------------------------------------------
// Return a text version with B, KB, MB, GB or TB.
//-----------------------------------------------------------------------------
#define DIM(x) (sizeof(x)/sizeof(*(x)))
void calculateSize(char *result, uint64_t size)
{
int i;
const char *sizes[] = { "TB", "GB", "MB", "KB", "B" };
uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL;
uint64_t multiplier = exbibytes;
for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) {
if(size < multiplier)
continue;
sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]);
return;
}
strcpy(result, "0");
return;
} // end routine
//-------------------------------------------------------------------------------------------------
// Returns the values in the event groups. Caller must know the number of events, and eventValues
// must be large enough to hold that many. eventIDArray must be large enough to hold that many
// event IDs.
//-------------------------------------------------------------------------------------------------
void readEventGroup(CUpti_EventGroup eventGroup,
hipDevice_t dev,
uint32_t numEvents,
CUpti_EventID *eventIdArray,
uint64_t *eventValues) {
size_t bufferSizeBytes, numCountersRead;
size_t eventIdArrayBytes= sizeof(CUpti_EventID) * numEvents;
size_t numTotalInstancesSize = 0;
uint64_t numTotalInstances = 0;
uint32_t i = 0, j = 0;
CUpti_EventDomainID domainId;
size_t domainSize;
domainSize = sizeof(CUpti_EventDomainID);
CUPTI_CALL(cuptiEventGroupGetAttribute(eventGroup,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&domainSize,
(void *)&domainId));
numTotalInstancesSize = sizeof(uint64_t);
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(dev,
domainId,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize,
(void *)&numTotalInstances));
printf("LINE %i, DeviceEventDomainAttribute numTotalInstances=%llu.\n", __LINE__, numTotalInstances);
bufferSizeBytes = sizeof(uint64_t) * numEvents * numTotalInstances;
uint64_t *eventValueArray = (uint64_t *) malloc(bufferSizeBytes);
CHECK_ALLOC_ERROR(eventValueArray);
for (i=0; i<numEvents; i++) eventValues[i]=0; // init the values.
CUPTI_CALL(cuptiEventGroupReadAllEvents(eventGroup,
CUPTI_EVENT_READ_FLAG_NONE,
&bufferSizeBytes,
eventValueArray,
&eventIdArrayBytes,
eventIdArray,
&numCountersRead));
printf("LINE %i, numCountersRead=%u.\n", __LINE__, numCountersRead);
if (numCountersRead != numEvents) {
if (numCountersRead > numEvents) exit(-1);
}
// Arrangement of 2-d Array returned in eventValueArray:
// domain instance 0: event0 event1 ... eventN
// domain instance 1: event0 event1 ... eventN
// ...
// domain instance M: event0 event1 ... eventN
// But we accumulate by column, event[0], event[1], etc.
for (i = 0; i < numEvents; i++) { // outer loop column traversal.
for (j = 0; j < numTotalInstances; j++) { // inner loop row traversal.
eventValues[i] += eventValueArray[i + numEvents * j];
}
}
free(eventValueArray); // Done with this.
} // end routine.
//-------------------------------------------------------------------------------------------------
// For reading a metric. This still requires a group of events.
// This cannot read a metric that requires more than one group; if you need that, we need to pass
// a set instead, and loop through the groups in the set, and accumulate a table of the collected
// events. TC
//-------------------------------------------------------------------------------------------------
void readMetricValue(CUpti_EventGroup eventGroup, uint32_t numEvents,
hipDevice_t dev, CUpti_MetricID *metricId,
uint64_t ns_timeDuration,
CUpti_MetricValue *metricValue) {
int i;
uint64_t *eventValues = NULL;
CUpti_EventID *eventIDs;
size_t eventValuesSize = sizeof(uint64_t) * numEvents;
size_t eventIDsSize = sizeof(CUpti_EventID) * numEvents;
eventValues = (uint64_t *) malloc(eventValuesSize);
CHECK_ALLOC_ERROR(eventValues);
eventIDs = (CUpti_EventID *) malloc(eventIDsSize);
CHECK_ALLOC_ERROR(eventIDs);
readEventGroup(eventGroup, dev, numEvents, eventIDs, eventValues); // Read the event group.
for (i=0; i<numEvents; i++) {
printf(" readMetricValue: EventID %lu=read %lu.\n", eventIDs[i], eventValues[i]);
}
CUPTI_CALL(cuptiMetricGetValue(dev, metricId[0],
eventIDsSize, eventIDs,
eventValuesSize, eventValues,
ns_timeDuration, metricValue));
free(eventValues);
free(eventIDs);
} // end routine.
// Print metric value, we format based on the value kind
int printMetricValue(CUpti_MetricID metricId, CUpti_MetricValue metricValue,
const char *metricName) {
CUpti_MetricValueKind valueKind;
char str[64];
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("%s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueUint64);
printf("%s\n", str);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueInt64);
printf("%s\n", str);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("%s = %.2f%%\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueThroughput);
printf("%s\n", str);
break;
default:
fflush(stdout);
fprintf(stderr, "error: unknown value kind = %li\n", valueKind);
return -1; // indicate failure.
}
return 0; // indicate success.
} // end routine.
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
hipCtx_t ctx[MAX_GPU_COUNT];
printf( "Starting cudaTest_cupti_only.\n" );
// Parse command line arguments
parseCommandLineArgs(argc, argv);
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
char deviceName[64];
hipDevice_t device[MAX_GPU_COUNT];
CHECK_CUDA_ERROR( hipGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( i=0; i<GPU_N; i++ ) {
CHECK_CU_ERROR( hipDeviceGet( &device[i], i ), "hipDeviceGet" );
CHECK_CU_ERROR( hipDeviceGetName( deviceName, 64, device[i] ), "hipDeviceGetName" );
CHECK_CU_ERROR( hipDeviceGetAttribute( &computeCapabilityMajor,
hipDeviceAttributeComputeCapabilityMajor, device[i]), "hipDeviceGetAttribute");
CHECK_CU_ERROR( hipDeviceGetAttribute( &computeCapabilityMinor,
hipDeviceAttributeComputeCapabilityMinor, device[i]), "hipDeviceGetAttribute");
hipRuntimeGetVersion( &runtimeVersion );
hipDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n",
i, deviceName, computeCapabilityMajor, computeCapabilityMinor,
runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", i );
GPU_N = i;
break;
}
} // end for each device.
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n",
CUPTI_API_VERSION, cupti_linked_version );
// create one context per device
for (i = 0; i < GPU_N; i++) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR( hipCtxCreate( &(ctx[i]), 0, device[i] ), "hipCtxCreate" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
printf("Searching for '%s'.\n", NameToCollect);
CUptiResult myCURes;
CUpti_EventID eventId;
CUpti_MetricID metricId;
CUpti_MetricValueKind metricKind;
size_t metricKindSize = sizeof(CUpti_MetricValueKind);
uint32_t numSubs; // Number of sub-events in Metric.
int isMetric = 0; // Presume this is not a metric.
int numEventGroups = 0;
int numMetricEvents[MAX_GPU_COUNT]={0};
size_t sizeInt = sizeof(int);
myCURes = cuptiEventGetIdFromName(0, NameToCollect, &eventId);
if (myCURes == CUPTI_SUCCESS) {
printf("Found '%s' as an event.\n", NameToCollect);
} else {
myCURes = cuptiMetricGetIdFromName(0, NameToCollect, &metricId);
if (myCURes == CUPTI_SUCCESS) {
isMetric = 1; // remember we found a metric.
printf("Found '%s' as a metric.\n", NameToCollect);
} else {
printf("'%s' not found, as event or as metric.\n", NameToCollect);
exit(-1);
}
}
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( hipStreamCreate( &plan[i].stream ) );
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( hipHostMalloc( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Create the group(s) needed to read the metric or event.
CUpti_EventGroup eg[MAX_GPU_COUNT]; // event group only.
CUpti_EventGroupSets* egs[MAX_GPU_COUNT]; // need event group sets for metric.
if (isMetric) { // If it is a metric, need a set.
printf("Setup CUPTI counters internally for metric '%s'.\n", NameToCollect);
for ( i=0; i<GPU_N; i++ ) { // For every device,
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CUPTI_CALL(cuptiSetEventCollectionMode(ctx[i],
CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS)); // note: CONTINOUS v. KERNEL made no difference in result.
// Here is where the change occurs. We have metricId.
// First, get number of events.
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &numSubs)); // Get number of events needed for metric.
size_t sizeBytes = numSubs * sizeof(CUpti_EventID); // bytes needed to store events.
CUpti_EventID *subEventIds = (CUpti_EventID*) malloc(sizeBytes); // Get the space for them.
CUPTI_CALL(cuptiMetricEnumEvents(metricId, &sizeBytes, subEventIds)); // Collect the events.
for (j=0; j<numSubs; j++) printf("Metric subEvent %i: %lu\n", j, subEventIds[j]);
CUPTI_CALL(cuptiMetricGetAttribute( // Get the kind.
metricId,
CUPTI_METRIC_ATTR_VALUE_KIND,
&metricKindSize, &metricKind));
printf("Metric value kind = %i.\n", metricKind);
CUPTI_CALL(cuptiEventGroupSetsCreate( // create event group sets.
ctx[i],
sizeBytes, subEventIds,
&egs[i]));
// The proper way to do it.
// CUPTI_CALL(cuptiMetricCreateEventGroupSets(ctx[i],
// sizeof(CUpti_MetricID), &metricId, &egs[i])); // Get the pointer to sets.
printf("Metric device %i requires %i sets.\n", i, egs[i]->numSets);
if (egs[i]->numSets > 1) {
printf("'%s' requires multiple application runs to complete. Aborting.\n", NameToCollect);
exit(-1);
}
numEventGroups = egs[i]->sets[0].numEventGroups; // collect groups in only set.
if (numEventGroups > 1) {
printf("'%s' requires multiple groups to complete metric. Aborting.\n", NameToCollect);
exit(-1);
}
// DEBUG note: This has to change to support metrics with multiple
// groups, if we ever see them. can't use eg[i], for example,
// you'd need a different one on each GPU. Tony C.
for (j=0; j<numEventGroups; j++) {
uint32_t one = 1;
eg[i] = egs[i]->sets[0].eventGroups[j]; // Copy the group.
CUPTI_CALL(cuptiEventGroupSetAttribute(eg[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(uint32_t), &one));
CUPTI_CALL(cuptiEventGroupGetAttribute(
eg[i], CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&sizeInt, &numMetricEvents[i])); // read # of events on this device.
printf("Group %i has %i events.\n", j+1, numMetricEvents[i]);
size_t subSize = numMetricEvents[i] * sizeof(CUpti_EventID); // size in bytes.
CUpti_EventID *subEvents = (CUpti_EventID*) malloc(subSize);
CUPTI_CALL( cuptiMetricEnumEvents(metricId, &subSize, subEvents));
int k;
for (k=0; k<numMetricEvents[i]; k++) {
printf(" Group %i event %i ID=%lu\n", j+1, k, subEvents[k]);
}
free(subEvents); // free memory used for diagnostic.
}
CUPTI_CALL(cuptiEventGroupSetEnable(&egs[i]->sets[0])); // Enable all groups in set.
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])),
"cuCtxPopCurrent" );
} // end of devices.
} else { // If it is an event, just need one group.
printf("Setup CUPTI counters internally for event '%s' (CUPTI_ONLY)\n", NameToCollect);
for ( i=0; i<GPU_N; i++ ) { // For every device,
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CUPTI_CALL(cuptiSetEventCollectionMode(ctx[i],
CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS));
CUPTI_CALL( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ));
CUPTI_CALL( cuptiEventGroupAddEvent(eg[i], eventId));
CUPTI_CALL( cuptiEventGroupEnable( eg[i] ));
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])),
"cuCtxPopCurrent" );
} // end of devices.
} // end of if metric/event.
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
uint64_t ns_timeDuration; // cuda device time elapsed.
uint64_t startTimestamp, endTimestamp;
CUPTI_CALL(cuptiGetTimestamp(&startTimestamp)); // We need time in ns for metrics.
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++) {
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ));
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Copy input data from CPU
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), hipMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
hipLaunchKernelGGL(( reduceKernel) , dim3(BLOCK_N), dim3(THREAD_N), 0, plan[i].stream , plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( hipGetLastError() != hipSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( hipMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), hipMemcpyDeviceToHost, plan[i].stream ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( hipSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Wait for all operations to finish
hipStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
CUPTI_CALL(cuptiGetTimestamp(&endTimestamp));
ns_timeDuration = endTimestamp - startTimestamp;
double gpuTime = (ns_timeDuration/((double) 1000000.0)); // convert to ms.
// Now, we must read the metric/event.
size_t size = 1024;
uint64_t buffer[size];
for ( i=0; i<GPU_N; i++ ) { // for each device,
CHECK_CUDA_ERROR( hipSetDevice( i ) ); // point at it.
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( hipCtxSynchronize( ), "hipCtxSynchronize" ); // wait for all to finish.
if (isMetric) { // If we have a metric,
CUpti_MetricValue metricValue;
readMetricValue(eg[i], numMetricEvents[i],
device[i], &metricId,
ns_timeDuration, &metricValue);
printf("Device %i, Metric: ",i); // prefix the printing...
printMetricValue(metricId, metricValue, NameToCollect); // Print "name = value\n".
} else { // If we have just an event.
readEventGroup(eg[i], device[i],
1, &eventId, // just 1 event.
&buffer[i]);
printf( "CUPTI %s device %d counterValue %u (on one domain, "
"may need to be multiplied by num of domains)\n",
NameToCollect, i, buffer[i] );
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
if (gpuTime > 0) {
printf( " CPU Processing time: %f (ms) (speedup %.2fX)\n", cpuTime, (cpuTime/gpuTime) );
} else {
printf( " CPU Processing time: %f (ms)\n", cpuTime);
}
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( hipSetDevice(i) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( hipStreamSynchronize(plan[i].stream) );
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( hipFree( plan[i].d_Data ) );
CHECK_CUDA_ERROR( hipStreamDestroy( plan[i].stream ) );
CHECK_CUDA_ERROR( hipHostFree( plan[i].h_Data ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
| 6dd638d8d22d18475f0c8e487ccc4dca17d4f596.cu | /*
* This example is taken from the NVIDIA documentation (Copyright 1993-2013
* NVIDIA Corporation) and has been adapted to show the use of CUPTI in
* collecting event counters for multiple GPU contexts.
*
* 'likeComp' does the job the component does: breaking the metric events
* out into a list and then building a group from that list, and trying to
* read it.
*/
/*
* This software contains source code provided by NVIDIA Corporation
*
* According to the Nvidia EULA (compute 5.5 version)
* http://developer.download.nvidia.com/compute/cuda/5_5/rel/docs/EULA.pdf
*
* Chapter 2. NVIDIA CORPORATION CUDA SAMPLES END USER LICENSE AGREEMENT
* 2.1.1. Source Code
* Developer shall have the right to modify and create derivative works with the Source
* Code. Developer shall own any derivative works ("Derivatives") it creates to the Source
* Code, provided that Developer uses the Materials in accordance with the terms and
* conditions of this Agreement. Developer may distribute the Derivatives, provided that
* all NVIDIA copyright notices and trademarks are propagated and used properly and
* the Derivatives include the following statement: “This software contains source code
* provided by NVIDIA Corporation.”
*/
/*
* This application demonstrates how to use the CUDA API to use multiple GPUs,
* with an emphasis on simple illustration of the techniques (not on performance).
*
* Note that in order to detect multiple GPUs in your system you have to disable
* SLI in the nvidia control panel. Otherwise only one GPU is visible to the
* application. On the other side, you can still extend your desktop to screens
* attached to both GPUs.
*/
// System includes
#include <stdio.h>
#include <assert.h>
// CUDA runtime
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cupti.h>
#include <timer.h>
#include "papi.h"
#include "papi_test.h"
#ifndef MAX
#define MAX(a,b) (a > b ? a : b)
#endif
#include "simpleMultiGPU.h"
// //////////////////////////////////////////////////////////////////////////////
// Data configuration
// //////////////////////////////////////////////////////////////////////////////
const int MAX_GPU_COUNT = 32;
const int DATA_N = 48576 * 32;
char *NameToCollect = NULL;
#define CHECK_CU_ERROR(err, cufunc) \
if (err != CUDA_SUCCESS) { printf ("Error %d for CUDA Driver API function '%s'\n", err, cufunc); return -1; }
#define CHECK_CUDA_ERROR(err) \
if (err != cudaSuccess) { printf ("%s:%i Error %d for CUDA [%s]\n", __FILE__, __LINE__, err, cudaGetErrorString(err) ); return -1; }
#define CUPTI_CALL(call) \
do { \
CUptiResult _status = call; \
if (_status != CUPTI_SUCCESS) { \
const char *errstr; \
cuptiGetResultString(_status, &errstr); \
fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \
__FILE__, __LINE__, #call, errstr); \
exit(-1); \
} \
} while (0)
#define CHECK_ALLOC_ERROR(var) \
do { \
if (var == NULL) { \
fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n", \
__FILE__, __LINE__); \
exit(-1); \
} \
} while (0)
// //////////////////////////////////////////////////////////////////////////////
// Simple reduction kernel.
// Refer to the 'reduction' CUDA SDK sample describing
// reduction optimization strategies
// //////////////////////////////////////////////////////////////////////////////
__global__ static void reduceKernel( float *d_Result, float *d_Input, int N )
{
const int tid = blockIdx.x * blockDim.x + threadIdx.x;
const int threadN = gridDim.x * blockDim.x;
float sum = 0;
for( int pos = tid; pos < N; pos += threadN )
sum += d_Input[pos];
d_Result[tid] = sum;
}
static void printUsage() {
printf("usage: Perform a CUPTI only test of an event or metric.\n");
printf(" -help : display help message\n");
printf(" EVENT_NAME : or Metric, must be the LAST argument, after any flags.\n");
printf("Note the PAPI prefix of 'cuda:::event:' or 'cuda:::metric:' should be left off,\n");
printf("also any ':device=n' suffix. Those are PAPI added elements for disambiguation. \n");
}
void parseCommandLineArgs(int argc, char *argv[])
{
if (argc < 2) {
printf("Invalid number of options\n");
printUsage();
exit(0);
}
NameToCollect = argv[1]; // Record name to collect.
} // end routine.
//-----------------------------------------------------------------------------
// Return a text version with B, KB, MB, GB or TB.
//-----------------------------------------------------------------------------
#define DIM(x) (sizeof(x)/sizeof(*(x)))
void calculateSize(char *result, uint64_t size)
{
int i;
const char *sizes[] = { "TB", "GB", "MB", "KB", "B" };
uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL;
uint64_t multiplier = exbibytes;
for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) {
if(size < multiplier)
continue;
sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]);
return;
}
strcpy(result, "0");
return;
} // end routine
//-------------------------------------------------------------------------------------------------
// Returns the values in the event groups. Caller must know the number of events, and eventValues
// must be large enough to hold that many. eventIDArray must be large enough to hold that many
// event IDs.
//-------------------------------------------------------------------------------------------------
void readEventGroup(CUpti_EventGroup eventGroup,
CUdevice dev,
uint32_t numEvents,
CUpti_EventID *eventIdArray,
uint64_t *eventValues) {
size_t bufferSizeBytes, numCountersRead;
size_t eventIdArrayBytes= sizeof(CUpti_EventID) * numEvents;
size_t numTotalInstancesSize = 0;
uint64_t numTotalInstances = 0;
uint32_t i = 0, j = 0;
CUpti_EventDomainID domainId;
size_t domainSize;
domainSize = sizeof(CUpti_EventDomainID);
CUPTI_CALL(cuptiEventGroupGetAttribute(eventGroup,
CUPTI_EVENT_GROUP_ATTR_EVENT_DOMAIN_ID,
&domainSize,
(void *)&domainId));
numTotalInstancesSize = sizeof(uint64_t);
CUPTI_CALL(cuptiDeviceGetEventDomainAttribute(dev,
domainId,
CUPTI_EVENT_DOMAIN_ATTR_TOTAL_INSTANCE_COUNT,
&numTotalInstancesSize,
(void *)&numTotalInstances));
printf("LINE %i, DeviceEventDomainAttribute numTotalInstances=%llu.\n", __LINE__, numTotalInstances);
bufferSizeBytes = sizeof(uint64_t) * numEvents * numTotalInstances;
uint64_t *eventValueArray = (uint64_t *) malloc(bufferSizeBytes);
CHECK_ALLOC_ERROR(eventValueArray);
for (i=0; i<numEvents; i++) eventValues[i]=0; // init the values.
CUPTI_CALL(cuptiEventGroupReadAllEvents(eventGroup,
CUPTI_EVENT_READ_FLAG_NONE,
&bufferSizeBytes,
eventValueArray,
&eventIdArrayBytes,
eventIdArray,
&numCountersRead));
printf("LINE %i, numCountersRead=%u.\n", __LINE__, numCountersRead);
if (numCountersRead != numEvents) {
if (numCountersRead > numEvents) exit(-1);
}
// Arrangement of 2-d Array returned in eventValueArray:
// domain instance 0: event0 event1 ... eventN
// domain instance 1: event0 event1 ... eventN
// ...
// domain instance M: event0 event1 ... eventN
// But we accumulate by column, event[0], event[1], etc.
for (i = 0; i < numEvents; i++) { // outer loop column traversal.
for (j = 0; j < numTotalInstances; j++) { // inner loop row traversal.
eventValues[i] += eventValueArray[i + numEvents * j];
}
}
free(eventValueArray); // Done with this.
} // end routine.
//-------------------------------------------------------------------------------------------------
// For reading a metric. This still requires a group of events.
// This cannot read a metric that requires more than one group; if you need that, we need to pass
// a set instead, and loop through the groups in the set, and accumulate a table of the collected
// events. TC
//-------------------------------------------------------------------------------------------------
void readMetricValue(CUpti_EventGroup eventGroup, uint32_t numEvents,
CUdevice dev, CUpti_MetricID *metricId,
uint64_t ns_timeDuration,
CUpti_MetricValue *metricValue) {
int i;
uint64_t *eventValues = NULL;
CUpti_EventID *eventIDs;
size_t eventValuesSize = sizeof(uint64_t) * numEvents;
size_t eventIDsSize = sizeof(CUpti_EventID) * numEvents;
eventValues = (uint64_t *) malloc(eventValuesSize);
CHECK_ALLOC_ERROR(eventValues);
eventIDs = (CUpti_EventID *) malloc(eventIDsSize);
CHECK_ALLOC_ERROR(eventIDs);
readEventGroup(eventGroup, dev, numEvents, eventIDs, eventValues); // Read the event group.
for (i=0; i<numEvents; i++) {
printf(" readMetricValue: EventID %lu=read %lu.\n", eventIDs[i], eventValues[i]);
}
CUPTI_CALL(cuptiMetricGetValue(dev, metricId[0],
eventIDsSize, eventIDs,
eventValuesSize, eventValues,
ns_timeDuration, metricValue));
free(eventValues);
free(eventIDs);
} // end routine.
// Print metric value, we format based on the value kind
int printMetricValue(CUpti_MetricID metricId, CUpti_MetricValue metricValue,
const char *metricName) {
CUpti_MetricValueKind valueKind;
char str[64];
size_t valueKindSize = sizeof(valueKind);
CUPTI_CALL(cuptiMetricGetAttribute(metricId, CUPTI_METRIC_ATTR_VALUE_KIND,
&valueKindSize, &valueKind));
switch (valueKind) {
case CUPTI_METRIC_VALUE_KIND_DOUBLE:
printf("%s = %f\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_UINT64:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueUint64);
printf("%s\n", str);
break;
case CUPTI_METRIC_VALUE_KIND_INT64:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueInt64);
printf("%s\n", str);
break;
case CUPTI_METRIC_VALUE_KIND_PERCENT:
printf("%s = %.2f%%\n", metricName, metricValue.metricValueDouble);
break;
case CUPTI_METRIC_VALUE_KIND_THROUGHPUT:
printf("%s = ", metricName);
calculateSize(str, (uint64_t)metricValue.metricValueThroughput);
printf("%s\n", str);
break;
default:
fflush(stdout);
fprintf(stderr, "error: unknown value kind = %li\n", valueKind);
return -1; // indicate failure.
}
return 0; // indicate success.
} // end routine.
// //////////////////////////////////////////////////////////////////////////////
// Program main
// //////////////////////////////////////////////////////////////////////////////
int main( int argc, char **argv )
{
// Solver config
TGPUplan plan[MAX_GPU_COUNT];
// GPU reduction results
float h_SumGPU[MAX_GPU_COUNT];
float sumGPU;
double sumCPU, diff;
int i, j, gpuBase, GPU_N;
const int BLOCK_N = 32;
const int THREAD_N = 256;
const int ACCUM_N = BLOCK_N * THREAD_N;
CUcontext ctx[MAX_GPU_COUNT];
printf( "Starting cudaTest_cupti_only.\n" );
// Parse command line arguments
parseCommandLineArgs(argc, argv);
// Report on the available CUDA devices
int computeCapabilityMajor = 0, computeCapabilityMinor = 0;
int runtimeVersion = 0, driverVersion = 0;
char deviceName[64];
CUdevice device[MAX_GPU_COUNT];
CHECK_CUDA_ERROR( cudaGetDeviceCount( &GPU_N ) );
if( GPU_N > MAX_GPU_COUNT ) GPU_N = MAX_GPU_COUNT;
printf( "CUDA-capable device count: %i\n", GPU_N );
for ( i=0; i<GPU_N; i++ ) {
CHECK_CU_ERROR( cuDeviceGet( &device[i], i ), "cuDeviceGet" );
CHECK_CU_ERROR( cuDeviceGetName( deviceName, 64, device[i] ), "cuDeviceGetName" );
CHECK_CU_ERROR( cuDeviceGetAttribute( &computeCapabilityMajor,
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR, device[i]), "cuDeviceGetAttribute");
CHECK_CU_ERROR( cuDeviceGetAttribute( &computeCapabilityMinor,
CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR, device[i]), "cuDeviceGetAttribute");
cudaRuntimeGetVersion( &runtimeVersion );
cudaDriverGetVersion( &driverVersion );
printf( "CUDA Device %d: %s : computeCapability %d.%d runtimeVersion %d.%d driverVersion %d.%d\n",
i, deviceName, computeCapabilityMajor, computeCapabilityMinor,
runtimeVersion/1000, (runtimeVersion%100)/10, driverVersion/1000, (driverVersion%100)/10 );
if ( computeCapabilityMajor < 2 ) {
printf( "CUDA Device %d compute capability is too low... will not add any more GPUs\n", i );
GPU_N = i;
break;
}
} // end for each device.
uint32_t cupti_linked_version;
cuptiGetVersion( &cupti_linked_version );
printf("CUPTI version: Compiled against version %d; Linked against version %d\n",
CUPTI_API_VERSION, cupti_linked_version );
// create one context per device
for (i = 0; i < GPU_N; i++) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR( cuCtxCreate( &(ctx[i]), 0, device[i] ), "cuCtxCreate" );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
printf("Searching for '%s'.\n", NameToCollect);
CUptiResult myCURes;
CUpti_EventID eventId;
CUpti_MetricID metricId;
CUpti_MetricValueKind metricKind;
size_t metricKindSize = sizeof(CUpti_MetricValueKind);
uint32_t numSubs; // Number of sub-events in Metric.
int isMetric = 0; // Presume this is not a metric.
int numEventGroups = 0;
int numMetricEvents[MAX_GPU_COUNT]={0};
size_t sizeInt = sizeof(int);
myCURes = cuptiEventGetIdFromName(0, NameToCollect, &eventId);
if (myCURes == CUPTI_SUCCESS) {
printf("Found '%s' as an event.\n", NameToCollect);
} else {
myCURes = cuptiMetricGetIdFromName(0, NameToCollect, &metricId);
if (myCURes == CUPTI_SUCCESS) {
isMetric = 1; // remember we found a metric.
printf("Found '%s' as a metric.\n", NameToCollect);
} else {
printf("'%s' not found, as event or as metric.\n", NameToCollect);
exit(-1);
}
}
printf( "Generating input data...\n" );
// Subdividing input data across GPUs
// Get data sizes for each GPU
for( i = 0; i < GPU_N; i++ )
plan[i].dataN = DATA_N / GPU_N;
// Take into account "odd" data sizes
for( i = 0; i < DATA_N % GPU_N; i++ )
plan[i].dataN++;
// Assign data ranges to GPUs
gpuBase = 0;
for( i = 0; i < GPU_N; i++ ) {
plan[i].h_Sum = h_SumGPU + i; // point within h_SumGPU array
gpuBase += plan[i].dataN;
}
// Create streams for issuing GPU command asynchronously and allocate memory (GPU and System page-locked)
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( cudaStreamCreate( &plan[i].stream ) );
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Data, plan[i].dataN * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMalloc( ( void ** ) &plan[i].d_Sum, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Sum_from_device, ACCUM_N * sizeof( float ) ) );
CHECK_CUDA_ERROR( cudaMallocHost( ( void ** ) &plan[i].h_Data, plan[i].dataN * sizeof( float ) ) );
for( j = 0; j < plan[i].dataN; j++ ) {
plan[i].h_Data[j] = ( float ) rand() / ( float ) RAND_MAX;
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Create the group(s) needed to read the metric or event.
CUpti_EventGroup eg[MAX_GPU_COUNT]; // event group only.
CUpti_EventGroupSets* egs[MAX_GPU_COUNT]; // need event group sets for metric.
if (isMetric) { // If it is a metric, need a set.
printf("Setup CUPTI counters internally for metric '%s'.\n", NameToCollect);
for ( i=0; i<GPU_N; i++ ) { // For every device,
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CUPTI_CALL(cuptiSetEventCollectionMode(ctx[i],
CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS)); // note: CONTINOUS v. KERNEL made no difference in result.
// Here is where the change occurs. We have metricId.
// First, get number of events.
CUPTI_CALL(cuptiMetricGetNumEvents(metricId, &numSubs)); // Get number of events needed for metric.
size_t sizeBytes = numSubs * sizeof(CUpti_EventID); // bytes needed to store events.
CUpti_EventID *subEventIds = (CUpti_EventID*) malloc(sizeBytes); // Get the space for them.
CUPTI_CALL(cuptiMetricEnumEvents(metricId, &sizeBytes, subEventIds)); // Collect the events.
for (j=0; j<numSubs; j++) printf("Metric subEvent %i: %lu\n", j, subEventIds[j]);
CUPTI_CALL(cuptiMetricGetAttribute( // Get the kind.
metricId,
CUPTI_METRIC_ATTR_VALUE_KIND,
&metricKindSize, &metricKind));
printf("Metric value kind = %i.\n", metricKind);
CUPTI_CALL(cuptiEventGroupSetsCreate( // create event group sets.
ctx[i],
sizeBytes, subEventIds,
&egs[i]));
// The proper way to do it.
// CUPTI_CALL(cuptiMetricCreateEventGroupSets(ctx[i],
// sizeof(CUpti_MetricID), &metricId, &egs[i])); // Get the pointer to sets.
printf("Metric device %i requires %i sets.\n", i, egs[i]->numSets);
if (egs[i]->numSets > 1) {
printf("'%s' requires multiple application runs to complete. Aborting.\n", NameToCollect);
exit(-1);
}
numEventGroups = egs[i]->sets[0].numEventGroups; // collect groups in only set.
if (numEventGroups > 1) {
printf("'%s' requires multiple groups to complete metric. Aborting.\n", NameToCollect);
exit(-1);
}
// DEBUG note: This has to change to support metrics with multiple
// groups, if we ever see them. can't use eg[i], for example,
// you'd need a different one on each GPU. Tony C.
for (j=0; j<numEventGroups; j++) {
uint32_t one = 1;
eg[i] = egs[i]->sets[0].eventGroups[j]; // Copy the group.
CUPTI_CALL(cuptiEventGroupSetAttribute(eg[i],
CUPTI_EVENT_GROUP_ATTR_PROFILE_ALL_DOMAIN_INSTANCES,
sizeof(uint32_t), &one));
CUPTI_CALL(cuptiEventGroupGetAttribute(
eg[i], CUPTI_EVENT_GROUP_ATTR_NUM_EVENTS,
&sizeInt, &numMetricEvents[i])); // read # of events on this device.
printf("Group %i has %i events.\n", j+1, numMetricEvents[i]);
size_t subSize = numMetricEvents[i] * sizeof(CUpti_EventID); // size in bytes.
CUpti_EventID *subEvents = (CUpti_EventID*) malloc(subSize);
CUPTI_CALL( cuptiMetricEnumEvents(metricId, &subSize, subEvents));
int k;
for (k=0; k<numMetricEvents[i]; k++) {
printf(" Group %i event %i ID=%lu\n", j+1, k, subEvents[k]);
}
free(subEvents); // free memory used for diagnostic.
}
CUPTI_CALL(cuptiEventGroupSetEnable(&egs[i]->sets[0])); // Enable all groups in set.
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])),
"cuCtxPopCurrent" );
} // end of devices.
} else { // If it is an event, just need one group.
printf("Setup CUPTI counters internally for event '%s' (CUPTI_ONLY)\n", NameToCollect);
for ( i=0; i<GPU_N; i++ ) { // For every device,
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CUPTI_CALL(cuptiSetEventCollectionMode(ctx[i],
CUPTI_EVENT_COLLECTION_MODE_CONTINUOUS));
CUPTI_CALL( cuptiEventGroupCreate( ctx[i], &eg[i], 0 ));
CUPTI_CALL( cuptiEventGroupAddEvent(eg[i], eventId));
CUPTI_CALL( cuptiEventGroupEnable( eg[i] ));
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])),
"cuCtxPopCurrent" );
} // end of devices.
} // end of if metric/event.
// Start timing and compute on GPU(s)
printf( "Computing with %d GPUs...\n", GPU_N );
uint64_t ns_timeDuration; // cuda device time elapsed.
uint64_t startTimestamp, endTimestamp;
CUPTI_CALL(cuptiGetTimestamp(&startTimestamp)); // We need time in ns for metrics.
// Copy data to GPU, launch the kernel and copy data back. All asynchronously
for (i = 0; i < GPU_N; i++) {
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ));
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Copy input data from CPU
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].d_Data, plan[i].h_Data, plan[i].dataN * sizeof( float ), cudaMemcpyHostToDevice, plan[i].stream ) );
// Perform GPU computations
reduceKernel <<< BLOCK_N, THREAD_N, 0, plan[i].stream >>> ( plan[i].d_Sum, plan[i].d_Data, plan[i].dataN );
if ( cudaGetLastError() != cudaSuccess ) { printf( "reduceKernel() execution failed (GPU %d).\n", i ); exit(EXIT_FAILURE); }
// Read back GPU results
CHECK_CUDA_ERROR( cudaMemcpyAsync( plan[i].h_Sum_from_device, plan[i].d_Sum, ACCUM_N * sizeof( float ), cudaMemcpyDeviceToHost, plan[i].stream ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
// Process GPU results
printf( "Process GPU results on %d GPUs...\n", GPU_N );
for( i = 0; i < GPU_N; i++ ) {
float sum;
// Set device
CHECK_CUDA_ERROR( cudaSetDevice( i ) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
// Wait for all operations to finish
cudaStreamSynchronize( plan[i].stream );
// Finalize GPU reduction for current subvector
sum = 0;
for( j = 0; j < ACCUM_N; j++ ) {
sum += plan[i].h_Sum_from_device[j];
}
*( plan[i].h_Sum ) = ( float ) sum;
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
CUPTI_CALL(cuptiGetTimestamp(&endTimestamp));
ns_timeDuration = endTimestamp - startTimestamp;
double gpuTime = (ns_timeDuration/((double) 1000000.0)); // convert to ms.
// Now, we must read the metric/event.
size_t size = 1024;
uint64_t buffer[size];
for ( i=0; i<GPU_N; i++ ) { // for each device,
CHECK_CUDA_ERROR( cudaSetDevice( i ) ); // point at it.
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CU_ERROR( cuCtxSynchronize( ), "cuCtxSynchronize" ); // wait for all to finish.
if (isMetric) { // If we have a metric,
CUpti_MetricValue metricValue;
readMetricValue(eg[i], numMetricEvents[i],
device[i], &metricId,
ns_timeDuration, &metricValue);
printf("Device %i, Metric: ",i); // prefix the printing...
printMetricValue(metricId, metricValue, NameToCollect); // Print "name = value\n".
} else { // If we have just an event.
readEventGroup(eg[i], device[i],
1, &eventId, // just 1 event.
&buffer[i]);
printf( "CUPTI %s device %d counterValue %u (on one domain, "
"may need to be multiplied by num of domains)\n",
NameToCollect, i, buffer[i] );
}
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
sumGPU = 0;
for( i = 0; i < GPU_N; i++ ) {
sumGPU += h_SumGPU[i];
}
printf( " GPU Processing time: %f (ms)\n", gpuTime );
// Compute on Host CPU
printf( "Computing the same result with Host CPU...\n" );
StartTimer();
sumCPU = 0;
for( i = 0; i < GPU_N; i++ ) {
for( j = 0; j < plan[i].dataN; j++ ) {
sumCPU += plan[i].h_Data[j];
}
}
double cpuTime = GetTimer();
if (gpuTime > 0) {
printf( " CPU Processing time: %f (ms) (speedup %.2fX)\n", cpuTime, (cpuTime/gpuTime) );
} else {
printf( " CPU Processing time: %f (ms)\n", cpuTime);
}
// Compare GPU and CPU results
printf( "Comparing GPU and Host CPU results...\n" );
diff = fabs( sumCPU - sumGPU ) / fabs( sumCPU );
printf( " GPU sum: %f\n CPU sum: %f\n", sumGPU, sumCPU );
printf( " Relative difference: %E \n", diff );
// Cleanup and shutdown
for( i = 0; i < GPU_N; i++ ) {
CHECK_CUDA_ERROR( cudaSetDevice(i) );
CHECK_CU_ERROR(cuCtxPushCurrent(ctx[i]), "cuCtxPushCurrent");
CHECK_CUDA_ERROR( cudaStreamSynchronize(plan[i].stream) );
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Sum_from_device ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Sum ) );
CHECK_CUDA_ERROR( cudaFree( plan[i].d_Data ) );
CHECK_CUDA_ERROR( cudaStreamDestroy( plan[i].stream ) );
CHECK_CUDA_ERROR( cudaFreeHost( plan[i].h_Data ) );
CHECK_CU_ERROR( cuCtxPopCurrent(&(ctx[i])), "cuCtxPopCurrent" );
}
exit( ( diff < 1e-5 ) ? EXIT_SUCCESS : EXIT_FAILURE );
}
|
48c30ee2785fb61162fccf4702123b1a1596951b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/sequence2batch.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index,
int64_t height, int64_t width,
bool is_src_index) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * GridDimX;
while (id < height) {
int src_idx = is_src_index ? index[id] : id;
int dst_idx = is_src_index ? id : index[id];
const T* src_data = src + src_idx * width;
T* dst_data = dst + dst_idx * width;
for (int i = idx; i < width; i += BlockDimX) {
dst_data[i] = src_data[i];
}
id += BlockDimY * GridDimX;
}
}
template <typename T>
class CopyMatrixRowsFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& src, const size_t* index,
framework::Tensor& dst, bool is_src_index) {
auto src_dims = src.dims();
auto dst_dims = dst.dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2,
"The src must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
"The dst must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1],
"The width of src and dst must be same.");
auto height = dst_dims[0];
auto width = dst_dims[1];
auto* src_data = src.data<T>();
auto* dst_data = dst.data<T>();
dim3 threads(128, 8);
dim3 grid(8, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
hipLaunchKernelGGL(( CopyMatrixRowsKernel<T, 128, 8, 8>), dim3(grid), dim3(threads), 0, stream,
src_data, dst_data, index, height, width, is_src_index);
}
};
template class CopyMatrixRowsFunctor<platform::GPUPlace, float>;
template class CopyMatrixRowsFunctor<platform::GPUPlace, double>;
template class LoDTensor2BatchFunctor<platform::GPUPlace, float>;
template class LoDTensor2BatchFunctor<platform::GPUPlace, double>;
template class Batch2LoDTensorFunctor<platform::GPUPlace, float>;
template class Batch2LoDTensorFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| 48c30ee2785fb61162fccf4702123b1a1596951b.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserve.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/operators/math/sequence2batch.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T, int BlockDimX, int BlockDimY, int GridDimX>
__global__ void CopyMatrixRowsKernel(const T* src, T* dst, const size_t* index,
int64_t height, int64_t width,
bool is_src_index) {
int idx = threadIdx.x;
int idy = threadIdx.y;
int id = blockIdx.x + idy * GridDimX;
while (id < height) {
int src_idx = is_src_index ? index[id] : id;
int dst_idx = is_src_index ? id : index[id];
const T* src_data = src + src_idx * width;
T* dst_data = dst + dst_idx * width;
for (int i = idx; i < width; i += BlockDimX) {
dst_data[i] = src_data[i];
}
id += BlockDimY * GridDimX;
}
}
template <typename T>
class CopyMatrixRowsFunctor<platform::GPUPlace, T> {
public:
void operator()(const platform::DeviceContext& context,
const framework::Tensor& src, const size_t* index,
framework::Tensor& dst, bool is_src_index) {
auto src_dims = src.dims();
auto dst_dims = dst.dims();
PADDLE_ENFORCE_EQ(src_dims.size(), 2,
"The src must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(dst_dims.size(), 2,
"The dst must be matrix with rank 2.");
PADDLE_ENFORCE_EQ(src_dims[1], dst_dims[1],
"The width of src and dst must be same.");
auto height = dst_dims[0];
auto width = dst_dims[1];
auto* src_data = src.data<T>();
auto* dst_data = dst.data<T>();
dim3 threads(128, 8);
dim3 grid(8, 1);
auto stream =
reinterpret_cast<const platform::CUDADeviceContext&>(context).stream();
CopyMatrixRowsKernel<T, 128, 8, 8><<<grid, threads, 0, stream>>>(
src_data, dst_data, index, height, width, is_src_index);
}
};
template class CopyMatrixRowsFunctor<platform::GPUPlace, float>;
template class CopyMatrixRowsFunctor<platform::GPUPlace, double>;
template class LoDTensor2BatchFunctor<platform::GPUPlace, float>;
template class LoDTensor2BatchFunctor<platform::GPUPlace, double>;
template class Batch2LoDTensorFunctor<platform::GPUPlace, float>;
template class Batch2LoDTensorFunctor<platform::GPUPlace, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
c25a5f646913a2c633f7a12b950899f4be66c538.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "hip/hip_runtime.h"
#define CEIL(a,b) ((a+b-1)/b)
#define SWAP(a,b,t) t=b; b=a; a=t;
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define MAXTHGAUSSKN4 128
#define MAXTHGAUSSKN5 128
#define MAXTHGAUSSKN67 1024
#define MAXTHGAUSSKN8 256
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R + G + B) / 3.0;
}
// Improved BWKernel. Uses pre-computed values and 2D blocks.
__global__
void BWKernel2(double *ImgBW, uch *ImgGPU, ui Hpixels, ui RowBytes)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
// ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
// ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
// ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R + G + B) / 3.0;
}
// Improved BWKernel2. Calculates 4 pixels (3 int's) at a time
__global__
void BWKernel3(double *ImgBW, ui *ImgGPU32, ui Hpixels, ui RowInts)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui A, B, C;
ui Pix1, Pix2, Pix3, Pix4;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
ui MYcolIndex = MYcol*3;
if (MYcolIndex >= RowInts) return; // col out of range
ui MYoffset = MYrow * RowInts;
ui MYsrcIndex = MYoffset + MYcolIndex;
ui MYpixAddr = MYrow * Hpixels + MYcol*4;
A = ImgGPU32[MYsrcIndex]; // A=[B1,R0,G0,B0]
B = ImgGPU32[MYsrcIndex+1]; // B=[G2,B2,R1,G1]
C = ImgGPU32[MYsrcIndex+2]; // C=[R3,G3,B3,R2]
// Pix1 = R0+G0+B0;
Pix1 = (A & 0x000000FF) + ((A >> 8) & 0x000000FF) + ((A >> 16) & 0x000000FF);
// Pix2 = R1+G1+B1;
Pix2 = ((A >> 24) & 0x000000FF) + (B & 0x000000FF) + ((B >> 8) & 0x000000FF);
// Pix3 = R2+G2+B2;
Pix3 = (C & 0x000000FF) + ((B >> 16) & 0x000000FF) + ((B >> 24) & 0x000000FF);
// Pix4 = R3+G3+B3;
Pix4 = ((C >> 8) & 0x000000FF) + ((C >> 16) & 0x000000FF) + ((C >> 24) & 0x000000FF);
ImgBW[MYpixAddr] = (double)Pix1 * 0.33333333;
ImgBW[MYpixAddr + 1] = (double)Pix2 * 0.33333333;
ImgBW[MYpixAddr + 2] = (double)Pix3 * 0.33333333;
ImgBW[MYpixAddr + 3] = (double)Pix4 * 0.33333333;
}
__device__
double Gauss[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Kernel that calculates a Gauss image from the B&W image (one pixel)
// resulting image has a double type for each pixel position
__global__
void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double G=0.00;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G/159.00;
}
}
// Improved GaussKernel. Uses 2D blocks. Each kernel processes a single pixel
__global__
void GaussKernel2(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G = 0.00;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
__constant__
double GaussC[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Improved GaussKernel2. Uses constant memory to store filter coefficients
__global__
void GaussKernel3(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * GaussC[i + 2][j + 2]); // use constant memory
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
// Improved GaussKernel3. Reads multiple (5) rows into shared memory.
// Each thread computes 1 pixel.
__global__
void GaussKernel4(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 horizontal, 5 vertical neighbors stored in Shared Memory
__shared__ double Neighbors[MAXTHGAUSSKN4][5][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
// Read from GM to Shared Memory
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
row = MYrow + i - 2;
col = MYcol + j - 2;
indx = row * Hpixels + col;
Neighbors[MYtid][i][j] = ImgBW[indx];
}
//__syncthreads();
}
__syncthreads();
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid][i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel3. Reads multiple (5) rows into shared memory.
// Each thread computes 4 pixels. Horizontal resolution must be a multiple of 4.
__global__
void GaussKernel5(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 8 horizontal, 5 vertical neighbors
__shared__ double Neighbors[MAXTHGAUSSKN5][5][8];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j, k;
double G;
ui MYrow = blockIdx.y;
ui MYcol = (MYbid*ThrPerBlk + MYtid) * 4;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow < 2) || (MYrow > Vpixels - 3)){ // Top and bottom two rows
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex+1] = 0.0;
ImgGauss[MYpixIndex+2] = 0.0;
ImgGauss[MYpixIndex+3] = 0.0;
return;
}
if (MYcol > Hpixels - 3) { // Rightmost two columns
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex + 1] = 0.0;
return;
}
if (MYcol < 2) { // Leftmost two columns
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex + 1] = 0.0;
return;
}
MYpixIndex += 2; // Process 2 pix. shifted
MYcol += 2;
// Read from GM to Shared Memory
for (i = 0; i < 5; i++){
for (j = 0; j < 8; j++){
row = MYrow + i - 2;
col = MYcol + j - 2;
indx = row * Hpixels + col;
Neighbors[MYtid][i][j] = ImgBW[indx];
}
}
__syncthreads();
for (k = 0; k < 4; k++){
G = 0.000;
for (i = 0; i < 5; i++){
for (j = 0; j < 5; j++){
G += (Neighbors[MYtid][i][j+k] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex+k] = G / 159.00;
}
}
// Improved GaussKernel4. Each thread computes 1 pixel.
// Each thread reads 5 pixels into Shared Memory.
// Pixel at the same column, but 5 different rows (row-2 ... row+2)
__global__
void GaussKernel6(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 vertical neighbors for each pixel that is represented by a thread
__shared__ double Neighbors[MAXTHGAUSSKN67+4][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
ui IsEdgeThread=(MYtid==(ThrPerBlk-1));
// Read from GM to Shared Memory
// Each thread will read a single pixel
indx = MYpixIndex-2*Hpixels-2; // start 2 rows above & 2 columns left
if (!IsEdgeThread) {
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}
}else{
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
Neighbors[MYtid + 1][j] = ImgBW[indx + 1];
Neighbors[MYtid + 2][j] = ImgBW[indx + 2];
Neighbors[MYtid + 3][j] = ImgBW[indx + 3];
Neighbors[MYtid + 4][j] = ImgBW[indx + 4];
indx += Hpixels; // Next iteration will read next row, same column
}
}
__syncthreads();
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid+i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel6. Each block computes ThePerBlk-4 pixels.
// This eliminates the need to make exceptions for the "Edge" thread
__global__
void GaussKernel7(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 vertical neighbors for each pixel (read by each thread)
__shared__ double Neighbors[MAXTHGAUSSKN67][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*(ThrPerBlk-4) + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
// Read from GM to Shared Memory.
// Each thread will read a single pixel, for 5 neighboring rows
// Each block reads ThrPerBlk pixels starting at (2 left) location
indx = MYpixIndex - 2 * Hpixels - 2; // start 2 rows above & 2 columns left
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}
__syncthreads();
if (MYtid >= ThrPerBlk - 4) return; // Each block only computes only ThrPerBlk-4 pixels
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid + i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel7. Each block reads 12 rows.
// Each thread computes 8 vertical pixels.
__global__
void GaussKernel8(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 12 vertical neighbors are saved in the Shared Memory
// These are used to compute 8 vertical pixels by each thread
// Reads from 2 top and 2 bottom pixels are wasted.
__shared__ double Neighbors[MAXTHGAUSSKN8][12];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j, row;
double G[8] = { 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 };
ui MYrow = blockIdx.y*8;
ui isLastBlockY = (blockIdx.y == (blockDim.y - 1));
ui MYcol = MYbid*(ThrPerBlk - 4) + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0; // first and last 2 columns
return;
}
if (MYrow == 0) {
ImgGauss[MYpixIndex] = 0.0; // row0
ImgGauss[MYpixIndex+Hpixels] = 0.0; // row1
}
if (isLastBlockY) {
indx = (Vpixels - 2)*Hpixels + MYcol;
ImgGauss[indx] = 0.0; // last row-1
ImgGauss[indx + Hpixels] = 0.0; // last row
}
// Read from GM to Shared Memory.
// Each thread will read a single pixel, for 12 neighboring rows
// Each thread reads 12 pixels, but will only compute 8
indx = MYpixIndex;
for (j = 0; j < 12; j++) {
if ((MYrow+j) < Vpixels) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}else{
Neighbors[MYtid][j] = 0.00;
}
}
__syncthreads();
if (MYtid >= ThrPerBlk - 4) return; // Each block only computes only ThrPerBlk-4 pixels
for (row = 0; row < 8; row++) {
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G[row] += (Neighbors[MYtid + i][row+j] * GaussC[i][j]);
}
}
}
// Write all computed pixels back to GM
for (j = 0; j < 8; j++) {
ImgGauss[MYpixIndex] = G[j] / 159.00;
MYpixIndex += Hpixels;
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
// Kernel that calculates Gradient, Theta from the Gauss image
// resulting image has a double type for each pixel position
__global__
void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double GX,GY;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
}else{
GX = 0.0; GY = 0.0;
for (i = -1; i <= 1; i++){
for (j = -1; j <= 1; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
GX += (ImgGauss[indx] * Gx[i + 1][j + 1]);
GY += (ImgGauss[indx] * Gy[i + 1][j + 1]);
}
}
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
unsigned char PIXVAL;
double L, H, G, T;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
}else{
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
}else if (G >= H){ // edge
PIXVAL = EDGE;
}else{
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)){
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -22.5) && (T <= 22.5)){
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
}
else if ((T>22.5) && (T <= 67.5)){
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -67.5) && (T<-22.5)){
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(hipError_t error_id)
{
if (error_id != hipSuccess){
printf("CUDA ERROR :::%\n", hipGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// GPU code run times
float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU;
float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold;
hipError_t cudaStatus;
hipEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, BlkPerRowG, ThrPerBlk=256, NumBlocks, NumBlocksG, NumBlocksG8;
ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh, GPUDataTfrKernel, GPUDataTfrTotal;
ui RowBytes, RowInts;
ui *GPUImg32;
hipDeviceProp_t GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
int BWKN=1, GaussKN=1, SobelKN=1, ThresholdKN=1;
char BWKernelName[255], GaussKernelName[255], SobelKernelName[255], ThresholdKernelName[255];
strcpy(ProgName, "imedgeGCM");
switch (argc){
case 10: ThresholdKN = atoi(argv[9]);
case 9: SobelKN = atoi(argv[8]);
case 8: GaussKN = atoi(argv[7]);
case 7: BWKN = atoi(argv[6]);
case 6: ThreshHi = atoi(argv[5]);
case 5: ThreshLo = atoi(argv[4]);
case 4: ThrPerBlk = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi] [BWKernel=1-9] [GaussKernel=1-9] [SobelKernel=1-9] [ThresholdKernel=1-9]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100 1 3 4 5", ProgName);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){
printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n");
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
if ((BWKN < 1) || (BWKN > 9) || (GaussKN < 1) || (GaussKN > 9) || (SobelKN < 1) || (SobelKN > 9) || (ThresholdKN < 1) || (ThresholdKN > 9)) {
printf("Invalid kernel number ... Kernel numbers must be between 1 and 9\n");
if ((BWKN < 1) || (BWKN > 9)) printf("BW Kernel number %d is out of range",BWKN);
if ((GaussKN < 1) || (GaussKN > 9)) printf(" Kernel number %d is out of range", GaussKN);
if ((SobelKN < 1) || (SobelKN > 9)) printf(" Kernel number %d is out of range", SobelKN);
if ((ThresholdKN < 1) || (ThresholdKN > 9)) printf(" Kernel number %d is out of range", ThresholdKN);
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
// Handle special cases
if ((GaussKN == 4) && (ThrPerBlk>MAXTHGAUSSKN4)){
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 4 ... Set to %d.\n", MAXTHGAUSSKN4, MAXTHGAUSSKN4);
ThrPerBlk = MAXTHGAUSSKN4;
}
if ((GaussKN == 5) && (ThrPerBlk>MAXTHGAUSSKN5)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 5 ... Set to %d.\n", MAXTHGAUSSKN5, MAXTHGAUSSKN5);
ThrPerBlk = MAXTHGAUSSKN5;
}
if (( (GaussKN == 6) || (GaussKN == 7)) && (ThrPerBlk>MAXTHGAUSSKN67)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 6 or 7 ... Set to %d.\n", MAXTHGAUSSKN67, MAXTHGAUSSKN67);
ThrPerBlk = MAXTHGAUSSKN67;
}
if ((GaussKN == 8) && (ThrPerBlk>MAXTHGAUSSKN8)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 8 ... Set to %d.\n", MAXTHGAUSSKN8, MAXTHGAUSSKN8);
ThrPerBlk = MAXTHGAUSSKN8;
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
RowBytes = (IPH * 3 + 3) & (~3);
RowInts = RowBytes / 4;
BlkPerRow = CEIL(IPH, ThrPerBlk);
BlkPerRowG = CEIL(IPH, (ThrPerBlk-4));
NumBlocks = BlkPerRow * IPV;
NumBlocksG = BlkPerRowG * IPV;
NumBlocksG8 = BlkPerRowG * CEIL(IPV, 8);
dim3 dimGrid2D(BlkPerRow, ip.Vpixels);
dim3 dimGrid2D4(CEIL(BlkPerRow, 4), IPV);
dim3 dimGrid2DG(BlkPerRowG, IPV);
dim3 dimGrid2DG8(BlkPerRowG, CEIL(IPV, 8));
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
hipGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
goto EXITERROR;
}
cudaStatus = hipSetDevice(0);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto EXITERROR;
}
hipGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
hipEventCreate(&time1); hipEventCreate(&time2);
hipEventCreate(&time2BW); hipEventCreate(&time2Gauss); hipEventCreate(&time2Sobel);
hipEventCreate(&time3); hipEventCreate(&time4);
hipEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = hipMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMalloc failed! Can't allocate GPU memory\n");
goto EXITERROR;
}
GPUImg = (uch *)GPUptr;
GPUImg32 = (ui *)GPUImg;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = hipMemcpy(GPUImg, TheImg, IMAGESIZE, hipMemcpyHostToDevice);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy CPU to GPU failed!\n");
goto EXITCUDAERROR;
}
hipEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
// hipDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
switch (BWKN){
case 1:hipLaunchKernelGGL(( BWKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUBWImg, GPUImg, IPH);
strcpy(BWKernelName, "BWKernel: Everything is passed into the kernel");
break;
case 2:hipLaunchKernelGGL(( BWKernel2) , dim3(dimGrid2D), dim3(ThrPerBlk) , 0, 0, GPUBWImg, GPUImg, IPH, RowBytes);
strcpy(BWKernelName, "BWKernel2: Pre-computed values and 2D blocks");
break;
case 3:hipLaunchKernelGGL(( BWKernel3) , dim3(dimGrid2D4), dim3(ThrPerBlk) , 0, 0, GPUBWImg, GPUImg32, IPH, RowInts);
strcpy(BWKernelName, "BWKernel3: Calculates 4 pixels (3 int) at a time");
break;
default:printf("...... BW Kernel Number=%d ... NOT IMPLEMENTED .... \n", BWKN);
strcpy(BWKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2BW, 0); // Time stamp after BW image calculation
GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
switch (GaussKN){
case 1:hipLaunchKernelGGL(( GaussKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel: Everything is passed into the kernel");
break;
case 2:hipLaunchKernelGGL(( GaussKernel2) , dim3(dimGrid2D), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel2: Uses 2D blocks");
break;
case 3:hipLaunchKernelGGL(( GaussKernel3) , dim3(dimGrid2D), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel3: Stores filter coeff in constant memory");
break;
case 4:hipLaunchKernelGGL(( GaussKernel4) , dim3(dimGrid2D), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel4: Computes 1 pix/thread using Shared Memory");
break;
case 5:hipLaunchKernelGGL(( GaussKernel5) , dim3(dimGrid2D4), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel5: Computes 4 pix/thread using Shared Memory");
break;
case 6:hipLaunchKernelGGL(( GaussKernel6) , dim3(dimGrid2D), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel6: Each thread reads 5 rows of pixels into ShMem");
break;
case 7:hipLaunchKernelGGL(( GaussKernel7) , dim3(dimGrid2DG), dim3(ThrPerBlk) , 0, 0, GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel7: Blocks read 5 rows, compute ThrPerBlk-4 pixels");
break;
case 8: GaussKernel8 << < dimGrid2DG8, ThrPerBlk >> > (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel8: Blocks read 12 vertical pixels, and compute 8");
break;
default:printf("...... Gauss Kernel Number=%d ... NOT IMPLEMENTED .... \n", GaussKN);
strcpy(GaussKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation
GPUDataTfrGauss = 2 * sizeof(double)*IMAGEPIX;
switch (SobelKN){
case 1:hipLaunchKernelGGL(( SobelKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV);
strcpy(SobelKernelName, "SobelKernel: Everything is passed into the kernel");
break;
default:printf("...... Sobel Kernel Number=%d ... NOT IMPLEMENTED .... \n", SobelKN);
strcpy(SobelKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
hipEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation
GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
switch (ThresholdKN){
case 1:hipLaunchKernelGGL(( ThresholdKernel) , dim3(NumBlocks), dim3(ThrPerBlk) , 0, 0, GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, ThreshLo, ThreshHi);
strcpy(ThresholdKernelName, "ThresholdKernel: Everything is passed into the kernel");
break;
default:printf("...... Threshold Kernel Number=%d ... NOT IMPLEMENTED .... \n",ThresholdKN);
strcpy(ThresholdKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = hipDeviceSynchronize()) != hipSuccess) goto KERNELERROR;
GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE;
hipEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = hipMemcpy(CopyImg, GPUResultImg, IMAGESIZE, hipMemcpyDeviceToHost);
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipMemcpy GPU to CPU failed!");
goto EXITCUDAERROR;
}
hipEventRecord(time4, 0);
hipEventSynchronize(time1); hipEventSynchronize(time2);
hipEventSynchronize(time2BW); hipEventSynchronize(time2Gauss); hipEventSynchronize(time2Sobel);
hipEventSynchronize(time3); hipEventSynchronize(time4);
hipEventElapsedTime(&totalTime, time1, time4);
hipEventElapsedTime(&tfrCPUtoGPU, time1, time2);
hipEventElapsedTime(&kernelExecTimeBW, time2, time2BW);
hipEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss);
hipEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel);
hipEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3);
hipEventElapsedTime(&tfrGPUtoCPU, time3, time4);
totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold;
cudaStatus = hipDeviceSynchronize();
//checkError(hipGetLastError()); // screen for errors in kernel launches
if (cudaStatus != hipSuccess) {
fprintf(stderr, "\n Program failed after hipDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n--------------------------------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------------------------------\n");
printf("%s %s %s %u %d %d %d %d %d %d [Launched %u BLOCKS, %u BLOCKS/ROW]\n",
ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, BWKN, GaussKN, SobelKN, ThresholdKN, NumBlocks, BlkPerRow);
if (GaussKN == 7) {
printf(" Gauss Kernel 7: [Launched %u BLOCKS, %u BLOCKS/ROW]\n", NumBlocksG, BlkPerRowG);
}
if (GaussKN == 8) {
printf(" Gauss Kernel 8: [Launched %u BLOCKS, %u BLOCKS/ROW]\n", NumBlocksG8, BlkPerRowG);
}
printf("--------------------------------------------------------------------------------------------------\n");
printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU));
printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("----------------------------------------------------------------------------\n");
printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW));
printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss));
printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel));
printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold));
printf("----------------------------------------------------------------------------\n");
printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime));
printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime));
printf("----------------------------------------------------------------------------\n");
// Deallocate CPU, GPU memory and destroy events.
hipFree(GPUptr);
hipEventDestroy(time1); hipEventDestroy(time2);
hipEventDestroy(time2BW); hipEventDestroy(time2Gauss); hipEventDestroy(time2Sobel);
hipEventDestroy(time3); hipEventDestroy(time4);
// hipDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = hipDeviceReset();
if (cudaStatus != hipSuccess) {
fprintf(stderr, "hipDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
KERNELERROR:
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
EXITCUDAERROR:
hipFree(GPUptr);
EXITERROR:
free(TheImg);
free(CopyImg);
return(EXIT_FAILURE);
}
| c25a5f646913a2c633f7a12b950899f4be66c538.cu | #include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <iostream>
#include <ctype.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "cuda.h"
#define CEIL(a,b) ((a+b-1)/b)
#define SWAP(a,b,t) t=b; b=a; a=t;
#define PI 3.1415926
#define EDGE 0
#define NOEDGE 255
#define DATAMB(bytes) (bytes/1024/1024)
#define DATABW(bytes,timems) ((float)bytes/(timems * 1.024*1024.0*1024.0))
#define MAXTHGAUSSKN4 128
#define MAXTHGAUSSKN5 128
#define MAXTHGAUSSKN67 1024
#define MAXTHGAUSSKN8 256
typedef unsigned char uch;
typedef unsigned long ul;
typedef unsigned int ui;
uch *TheImg, *CopyImg; // Where images are stored in CPU
int ThreshLo=50, ThreshHi=100; // "Edge" vs. "No Edge" thresholds
// Where images and temporary results are stored in GPU
uch *GPUImg, *GPUResultImg;
double *GPUBWImg, *GPUGaussImg, *GPUGradient, *GPUTheta;
struct ImgProp{
ui Hpixels;
ui Vpixels;
uch HeaderInfo[54];
ul Hbytes;
} ip;
#define IPHB ip.Hbytes
#define IPH ip.Hpixels
#define IPV ip.Vpixels
#define IMAGESIZE (IPHB*IPV)
#define IMAGEPIX (IPH*IPV)
// Kernel that calculates a B&W image from an RGB image
// resulting image has a double type for each pixel position
__global__
void BWKernel(double *ImgBW, uch *ImgGPU, ui Hpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = MYbid / BlkPerRow;
ui MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R + G + B) / 3.0;
}
// Improved BWKernel. Uses pre-computed values and 2D blocks.
__global__
void BWKernel2(double *ImgBW, uch *ImgGPU, ui Hpixels, ui RowBytes)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
// ui MYgtid = ThrPerBlk * MYbid + MYtid;
double R, G, B;
//ui NumBlocks = gridDim.x;
// ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
// ui RowBytes = (Hpixels * 3 + 3) & (~3);
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYsrcIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
B = (double)ImgGPU[MYsrcIndex];
G = (double)ImgGPU[MYsrcIndex + 1];
R = (double)ImgGPU[MYsrcIndex + 2];
ImgBW[MYpixIndex] = (R + G + B) / 3.0;
}
// Improved BWKernel2. Calculates 4 pixels (3 int's) at a time
__global__
void BWKernel3(double *ImgBW, ui *ImgGPU32, ui Hpixels, ui RowInts)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui A, B, C;
ui Pix1, Pix2, Pix3, Pix4;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
ui MYcolIndex = MYcol*3;
if (MYcolIndex >= RowInts) return; // col out of range
ui MYoffset = MYrow * RowInts;
ui MYsrcIndex = MYoffset + MYcolIndex;
ui MYpixAddr = MYrow * Hpixels + MYcol*4;
A = ImgGPU32[MYsrcIndex]; // A=[B1,R0,G0,B0]
B = ImgGPU32[MYsrcIndex+1]; // B=[G2,B2,R1,G1]
C = ImgGPU32[MYsrcIndex+2]; // C=[R3,G3,B3,R2]
// Pix1 = R0+G0+B0;
Pix1 = (A & 0x000000FF) + ((A >> 8) & 0x000000FF) + ((A >> 16) & 0x000000FF);
// Pix2 = R1+G1+B1;
Pix2 = ((A >> 24) & 0x000000FF) + (B & 0x000000FF) + ((B >> 8) & 0x000000FF);
// Pix3 = R2+G2+B2;
Pix3 = (C & 0x000000FF) + ((B >> 16) & 0x000000FF) + ((B >> 24) & 0x000000FF);
// Pix4 = R3+G3+B3;
Pix4 = ((C >> 8) & 0x000000FF) + ((C >> 16) & 0x000000FF) + ((C >> 24) & 0x000000FF);
ImgBW[MYpixAddr] = (double)Pix1 * 0.33333333;
ImgBW[MYpixAddr + 1] = (double)Pix2 * 0.33333333;
ImgBW[MYpixAddr + 2] = (double)Pix3 * 0.33333333;
ImgBW[MYpixAddr + 3] = (double)Pix4 * 0.33333333;
}
__device__
double Gauss[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Kernel that calculates a Gauss image from the B&W image (one pixel)
// resulting image has a double type for each pixel position
__global__
void GaussKernel(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double G=0.00;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G/159.00;
}
}
// Improved GaussKernel. Uses 2D blocks. Each kernel processes a single pixel
__global__
void GaussKernel2(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G = 0.00;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * Gauss[i + 2][j + 2]);
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
__constant__
double GaussC[5][5] = { { 2, 4, 5, 4, 2 },
{ 4, 9, 12, 9, 4 },
{ 5, 12, 15, 12, 5 },
{ 4, 9, 12, 9, 4 },
{ 2, 4, 5, 4, 2 } };
// Improved GaussKernel2. Uses constant memory to store filter coefficients
__global__
void GaussKernel3(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)){
ImgGauss[MYpixIndex] = 0.0;
return;
}else{
G = 0.0;
for (i = -2; i <= 2; i++){
for (j = -2; j <= 2; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
G += (ImgBW[indx] * GaussC[i + 2][j + 2]); // use constant memory
}
}
ImgGauss[MYpixIndex] = G / 159.00;
}
}
// Improved GaussKernel3. Reads multiple (5) rows into shared memory.
// Each thread computes 1 pixel.
__global__
void GaussKernel4(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 horizontal, 5 vertical neighbors stored in Shared Memory
__shared__ double Neighbors[MAXTHGAUSSKN4][5][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
// Read from GM to Shared Memory
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
row = MYrow + i - 2;
col = MYcol + j - 2;
indx = row * Hpixels + col;
Neighbors[MYtid][i][j] = ImgBW[indx];
}
//__syncthreads();
}
__syncthreads();
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid][i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel3. Reads multiple (5) rows into shared memory.
// Each thread computes 4 pixels. Horizontal resolution must be a multiple of 4.
__global__
void GaussKernel5(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 8 horizontal, 5 vertical neighbors
__shared__ double Neighbors[MAXTHGAUSSKN5][5][8];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int row, col, indx, i, j, k;
double G;
ui MYrow = blockIdx.y;
ui MYcol = (MYbid*ThrPerBlk + MYtid) * 4;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow < 2) || (MYrow > Vpixels - 3)){ // Top and bottom two rows
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex+1] = 0.0;
ImgGauss[MYpixIndex+2] = 0.0;
ImgGauss[MYpixIndex+3] = 0.0;
return;
}
if (MYcol > Hpixels - 3) { // Rightmost two columns
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex + 1] = 0.0;
return;
}
if (MYcol < 2) { // Leftmost two columns
ImgGauss[MYpixIndex] = 0.0;
ImgGauss[MYpixIndex + 1] = 0.0;
return;
}
MYpixIndex += 2; // Process 2 pix. shifted
MYcol += 2;
// Read from GM to Shared Memory
for (i = 0; i < 5; i++){
for (j = 0; j < 8; j++){
row = MYrow + i - 2;
col = MYcol + j - 2;
indx = row * Hpixels + col;
Neighbors[MYtid][i][j] = ImgBW[indx];
}
}
__syncthreads();
for (k = 0; k < 4; k++){
G = 0.000;
for (i = 0; i < 5; i++){
for (j = 0; j < 5; j++){
G += (Neighbors[MYtid][i][j+k] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex+k] = G / 159.00;
}
}
// Improved GaussKernel4. Each thread computes 1 pixel.
// Each thread reads 5 pixels into Shared Memory.
// Pixel at the same column, but 5 different rows (row-2 ... row+2)
__global__
void GaussKernel6(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 vertical neighbors for each pixel that is represented by a thread
__shared__ double Neighbors[MAXTHGAUSSKN67+4][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*ThrPerBlk + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
ui IsEdgeThread=(MYtid==(ThrPerBlk-1));
// Read from GM to Shared Memory
// Each thread will read a single pixel
indx = MYpixIndex-2*Hpixels-2; // start 2 rows above & 2 columns left
if (!IsEdgeThread) {
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}
}else{
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
Neighbors[MYtid + 1][j] = ImgBW[indx + 1];
Neighbors[MYtid + 2][j] = ImgBW[indx + 2];
Neighbors[MYtid + 3][j] = ImgBW[indx + 3];
Neighbors[MYtid + 4][j] = ImgBW[indx + 4];
indx += Hpixels; // Next iteration will read next row, same column
}
}
__syncthreads();
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid+i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel6. Each block computes ThePerBlk-4 pixels.
// This eliminates the need to make exceptions for the "Edge" thread
__global__
void GaussKernel7(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 5 vertical neighbors for each pixel (read by each thread)
__shared__ double Neighbors[MAXTHGAUSSKN67][5];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j;
double G;
ui MYrow = blockIdx.y;
ui MYcol = MYbid*(ThrPerBlk-4) + MYtid;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<2) || (MYrow>Vpixels - 3) || (MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0;
return;
}
// Read from GM to Shared Memory.
// Each thread will read a single pixel, for 5 neighboring rows
// Each block reads ThrPerBlk pixels starting at (2 left) location
indx = MYpixIndex - 2 * Hpixels - 2; // start 2 rows above & 2 columns left
for (j = 0; j < 5; j++) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}
__syncthreads();
if (MYtid >= ThrPerBlk - 4) return; // Each block only computes only ThrPerBlk-4 pixels
G = 0.0;
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G += (Neighbors[MYtid + i][j] * GaussC[i][j]);
}
}
//__syncthreads();
ImgGauss[MYpixIndex] = G / 159.00;
}
// Improved GaussKernel7. Each block reads 12 rows.
// Each thread computes 8 vertical pixels.
__global__
void GaussKernel8(double *ImgGauss, double *ImgBW, ui Hpixels, ui Vpixels)
{
// 12 vertical neighbors are saved in the Shared Memory
// These are used to compute 8 vertical pixels by each thread
// Reads from 2 top and 2 bottom pixels are wasted.
__shared__ double Neighbors[MAXTHGAUSSKN8][12];
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
int indx, i, j, row;
double G[8] = { 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00 };
ui MYrow = blockIdx.y*8;
ui isLastBlockY = (blockIdx.y == (blockDim.y - 1));
ui MYcol = MYbid*(ThrPerBlk - 4) + MYtid;
if (MYcol >= Hpixels) return; // col out of range
if (MYrow >= Vpixels) return; // row out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYcol<2) || (MYcol>Hpixels - 3)) {
ImgGauss[MYpixIndex] = 0.0; // first and last 2 columns
return;
}
if (MYrow == 0) {
ImgGauss[MYpixIndex] = 0.0; // row0
ImgGauss[MYpixIndex+Hpixels] = 0.0; // row1
}
if (isLastBlockY) {
indx = (Vpixels - 2)*Hpixels + MYcol;
ImgGauss[indx] = 0.0; // last row-1
ImgGauss[indx + Hpixels] = 0.0; // last row
}
// Read from GM to Shared Memory.
// Each thread will read a single pixel, for 12 neighboring rows
// Each thread reads 12 pixels, but will only compute 8
indx = MYpixIndex;
for (j = 0; j < 12; j++) {
if ((MYrow+j) < Vpixels) {
Neighbors[MYtid][j] = ImgBW[indx];
indx += Hpixels; // Next iteration will read next row, same column
}else{
Neighbors[MYtid][j] = 0.00;
}
}
__syncthreads();
if (MYtid >= ThrPerBlk - 4) return; // Each block only computes only ThrPerBlk-4 pixels
for (row = 0; row < 8; row++) {
for (i = 0; i < 5; i++) {
for (j = 0; j < 5; j++) {
G[row] += (Neighbors[MYtid + i][row+j] * GaussC[i][j]);
}
}
}
// Write all computed pixels back to GM
for (j = 0; j < 8; j++) {
ImgGauss[MYpixIndex] = G[j] / 159.00;
MYpixIndex += Hpixels;
}
}
__device__
double Gx[3][3] = { { -1, 0, 1 },
{ -2, 0, 2 },
{ -1, 0, 1 } };
__device__
double Gy[3][3] = { { -1, -2, -1 },
{ 0, 0, 0 },
{ 1, 2, 1 } };
// Kernel that calculates Gradient, Theta from the Gauss image
// resulting image has a double type for each pixel position
__global__
void SobelKernel(double *ImgGrad, double *ImgTheta, double *ImgGauss, ui Hpixels, ui Vpixels)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
int row, col, indx, i, j;
double GX,GY;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgGrad[MYpixIndex] = 0.0;
ImgTheta[MYpixIndex] = 0.0;
return;
}else{
GX = 0.0; GY = 0.0;
for (i = -1; i <= 1; i++){
for (j = -1; j <= 1; j++){
row = MYrow + i;
col = MYcol + j;
indx = row*Hpixels + col;
GX += (ImgGauss[indx] * Gx[i + 1][j + 1]);
GY += (ImgGauss[indx] * Gy[i + 1][j + 1]);
}
}
ImgGrad[MYpixIndex] = sqrt(GX*GX + GY*GY);
ImgTheta[MYpixIndex] = atan(GX / GY)*180.0 / PI;
}
}
// Kernel that calculates the threshold image from Gradient, Theta
// resulting image has an RGB for each pixel, same RGB for each pixel
__global__
void ThresholdKernel(uch *ImgResult, double *ImgGrad, double *ImgTheta, ui Hpixels, ui Vpixels, ui ThreshLo, ui ThreshHi)
{
ui ThrPerBlk = blockDim.x;
ui MYbid = blockIdx.x;
ui MYtid = threadIdx.x;
ui MYgtid = ThrPerBlk * MYbid + MYtid;
unsigned char PIXVAL;
double L, H, G, T;
//ui NumBlocks = gridDim.x;
ui BlkPerRow = CEIL(Hpixels, ThrPerBlk);
ui RowBytes = (Hpixels * 3 + 3) & (~3);
int MYrow = MYbid / BlkPerRow;
int MYcol = MYgtid - MYrow*BlkPerRow*ThrPerBlk;
if (MYcol >= Hpixels) return; // col out of range
ui MYresultIndex = MYrow * RowBytes + 3 * MYcol;
ui MYpixIndex = MYrow * Hpixels + MYcol;
if ((MYrow<1) || (MYrow>Vpixels - 2) || (MYcol<1) || (MYcol>Hpixels - 2)){
ImgResult[MYresultIndex] = NOEDGE;
ImgResult[MYresultIndex + 1] = NOEDGE;
ImgResult[MYresultIndex + 2] = NOEDGE;
return;
}else{
L = (double)ThreshLo; H = (double)ThreshHi;
G = ImgGrad[MYpixIndex];
PIXVAL = NOEDGE;
if (G <= L){ // no edge
PIXVAL = NOEDGE;
}else if (G >= H){ // edge
PIXVAL = EDGE;
}else{
T = ImgTheta[MYpixIndex];
if ((T<-67.5) || (T>67.5)){
// Look at left and right: [row][col-1] and [row][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - 1]>H) || (ImgGrad[MYpixIndex + 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -22.5) && (T <= 22.5)){
// Look at top and bottom: [row-1][col] and [row+1][col]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels]>H) || (ImgGrad[MYpixIndex + Hpixels]>H)) ? EDGE : NOEDGE;
}
else if ((T>22.5) && (T <= 67.5)){
// Look at upper right, lower left: [row-1][col+1] and [row+1][col-1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels + 1]>H) || (ImgGrad[MYpixIndex + Hpixels - 1]>H)) ? EDGE : NOEDGE;
}
else if ((T >= -67.5) && (T<-22.5)){
// Look at upper left, lower right: [row-1][col-1] and [row+1][col+1]
PIXVAL = ((ImgGrad[MYpixIndex - Hpixels - 1]>H) || (ImgGrad[MYpixIndex + Hpixels + 1]>H)) ? EDGE : NOEDGE;
}
}
ImgResult[MYresultIndex] = PIXVAL;
ImgResult[MYresultIndex + 1] = PIXVAL;
ImgResult[MYresultIndex + 2] = PIXVAL;
}
}
/*
// helper function that wraps CUDA API calls, reports any error and exits
void chkCUDAErr(cudaError_t error_id)
{
if (error_id != CUDA_SUCCESS){
printf("CUDA ERROR :::%\n", cudaGetErrorString(error_id));
exit(EXIT_FAILURE);
}
}
*/
// Read a 24-bit/pixel BMP file into a 1D linear array.
// Allocate memory to store the 1D image and return its pointer.
uch *ReadBMPlin(char* fn)
{
static uch *Img;
FILE* f = fopen(fn, "rb");
if (f == NULL){ printf("\n\n%s NOT FOUND\n\n", fn); exit(EXIT_FAILURE); }
uch HeaderInfo[54];
fread(HeaderInfo, sizeof(uch), 54, f); // read the 54-byte header
// extract image height and width from header
int width = *(int*)&HeaderInfo[18]; ip.Hpixels = width;
int height = *(int*)&HeaderInfo[22]; ip.Vpixels = height;
int RowBytes = (width * 3 + 3) & (~3); ip.Hbytes = RowBytes;
//save header for re-use
memcpy(ip.HeaderInfo, HeaderInfo,54);
printf("\n Input File name: %17s (%u x %u) File Size=%u", fn,
ip.Hpixels, ip.Vpixels, IMAGESIZE);
// allocate memory to store the main image (1 Dimensional array)
Img = (uch *)malloc(IMAGESIZE);
if (Img == NULL) return Img; // Cannot allocate memory
// read the image from disk
fread(Img, sizeof(uch), IMAGESIZE, f);
fclose(f);
return Img;
}
// Write the 1D linear-memory stored image into file.
void WriteBMPlin(uch *Img, char* fn)
{
FILE* f = fopen(fn, "wb");
if (f == NULL){ printf("\n\nFILE CREATION ERROR: %s\n\n", fn); exit(1); }
//write header
fwrite(ip.HeaderInfo, sizeof(uch), 54, f);
//write data
fwrite(Img, sizeof(uch), IMAGESIZE, f);
printf("\nOutput File name: %17s (%u x %u) File Size=%u", fn, ip.Hpixels, ip.Vpixels, IMAGESIZE);
fclose(f);
}
int main(int argc, char **argv)
{
// GPU code run times
float totalTime, totalKernelTime, tfrCPUtoGPU, tfrGPUtoCPU;
float kernelExecTimeBW, kernelExecTimeGauss, kernelExecTimeSobel, kernelExecTimeThreshold;
cudaError_t cudaStatus;
cudaEvent_t time1, time2, time2BW, time2Gauss, time2Sobel, time3, time4;
char InputFileName[255], OutputFileName[255], ProgName[255];
ui BlkPerRow, BlkPerRowG, ThrPerBlk=256, NumBlocks, NumBlocksG, NumBlocksG8;
ui GPUDataTfrBW, GPUDataTfrGauss, GPUDataTfrSobel, GPUDataTfrThresh, GPUDataTfrKernel, GPUDataTfrTotal;
ui RowBytes, RowInts;
ui *GPUImg32;
cudaDeviceProp GPUprop;
void *GPUptr; // Pointer to the bulk-allocated GPU memory
ul GPUtotalBufferSize;
ul SupportedKBlocks, SupportedMBlocks, MaxThrPerBlk; char SupportedBlocks[100];
int BWKN=1, GaussKN=1, SobelKN=1, ThresholdKN=1;
char BWKernelName[255], GaussKernelName[255], SobelKernelName[255], ThresholdKernelName[255];
strcpy(ProgName, "imedgeGCM");
switch (argc){
case 10: ThresholdKN = atoi(argv[9]);
case 9: SobelKN = atoi(argv[8]);
case 8: GaussKN = atoi(argv[7]);
case 7: BWKN = atoi(argv[6]);
case 6: ThreshHi = atoi(argv[5]);
case 5: ThreshLo = atoi(argv[4]);
case 4: ThrPerBlk = atoi(argv[3]);
case 3: strcpy(InputFileName, argv[1]);
strcpy(OutputFileName, argv[2]);
break;
default: printf("\n\nUsage: %s InputFilename OutputFilename [ThrPerBlk] [ThreshLo] [ThreshHi] [BWKernel=1-9] [GaussKernel=1-9] [SobelKernel=1-9] [ThresholdKernel=1-9]", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256", ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100",ProgName);
printf("\n\nExample: %s Astronaut.bmp Output.bmp 256 50 100 1 3 4 5", ProgName);
exit(EXIT_FAILURE);
}
if ((ThrPerBlk < 32) || (ThrPerBlk > 1024)) {
printf("Invalid ThrPerBlk option '%u'. Must be between 32 and 1024. \n", ThrPerBlk);
exit(EXIT_FAILURE);
}
if ((ThreshLo<0) || (ThreshHi>255) || (ThreshLo>ThreshHi)){
printf("\nInvalid Thresholds: Threshold must be between [0...255] ...\n");
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
if ((BWKN < 1) || (BWKN > 9) || (GaussKN < 1) || (GaussKN > 9) || (SobelKN < 1) || (SobelKN > 9) || (ThresholdKN < 1) || (ThresholdKN > 9)) {
printf("Invalid kernel number ... Kernel numbers must be between 1 and 9\n");
if ((BWKN < 1) || (BWKN > 9)) printf("BW Kernel number %d is out of range",BWKN);
if ((GaussKN < 1) || (GaussKN > 9)) printf(" Kernel number %d is out of range", GaussKN);
if ((SobelKN < 1) || (SobelKN > 9)) printf(" Kernel number %d is out of range", SobelKN);
if ((ThresholdKN < 1) || (ThresholdKN > 9)) printf(" Kernel number %d is out of range", ThresholdKN);
printf("\n\nNothing executed ... Exiting ...\n\n");
exit(EXIT_FAILURE);
}
// Handle special cases
if ((GaussKN == 4) && (ThrPerBlk>MAXTHGAUSSKN4)){
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 4 ... Set to %d.\n", MAXTHGAUSSKN4, MAXTHGAUSSKN4);
ThrPerBlk = MAXTHGAUSSKN4;
}
if ((GaussKN == 5) && (ThrPerBlk>MAXTHGAUSSKN5)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 5 ... Set to %d.\n", MAXTHGAUSSKN5, MAXTHGAUSSKN5);
ThrPerBlk = MAXTHGAUSSKN5;
}
if (( (GaussKN == 6) || (GaussKN == 7)) && (ThrPerBlk>MAXTHGAUSSKN67)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 6 or 7 ... Set to %d.\n", MAXTHGAUSSKN67, MAXTHGAUSSKN67);
ThrPerBlk = MAXTHGAUSSKN67;
}
if ((GaussKN == 8) && (ThrPerBlk>MAXTHGAUSSKN8)) {
printf("ThrPerBlk cannot be higher than %d in Gauss Kernel 8 ... Set to %d.\n", MAXTHGAUSSKN8, MAXTHGAUSSKN8);
ThrPerBlk = MAXTHGAUSSKN8;
}
// Create CPU memory to store the input and output images
TheImg = ReadBMPlin(InputFileName); // Read the input image if memory can be allocated
if (TheImg == NULL){
printf("Cannot allocate memory for the input image...\n");
exit(EXIT_FAILURE);
}
CopyImg = (uch *)malloc(IMAGESIZE);
if (CopyImg == NULL){
printf("Cannot allocate memory for the input image...\n");
free(TheImg);
exit(EXIT_FAILURE);
}
RowBytes = (IPH * 3 + 3) & (~3);
RowInts = RowBytes / 4;
BlkPerRow = CEIL(IPH, ThrPerBlk);
BlkPerRowG = CEIL(IPH, (ThrPerBlk-4));
NumBlocks = BlkPerRow * IPV;
NumBlocksG = BlkPerRowG * IPV;
NumBlocksG8 = BlkPerRowG * CEIL(IPV, 8);
dim3 dimGrid2D(BlkPerRow, ip.Vpixels);
dim3 dimGrid2D4(CEIL(BlkPerRow, 4), IPV);
dim3 dimGrid2DG(BlkPerRowG, IPV);
dim3 dimGrid2DG8(BlkPerRowG, CEIL(IPV, 8));
// Choose which GPU to run on, change this on a multi-GPU system.
int NumGPUs = 0;
cudaGetDeviceCount(&NumGPUs);
if (NumGPUs == 0){
printf("\nNo CUDA Device is available\n");
goto EXITERROR;
}
cudaStatus = cudaSetDevice(0);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?\n");
goto EXITERROR;
}
cudaGetDeviceProperties(&GPUprop, 0);
SupportedKBlocks = (ui) GPUprop.maxGridSize[0] * (ui) GPUprop.maxGridSize[1] * (ui )GPUprop.maxGridSize[2]/1024;
SupportedMBlocks = SupportedKBlocks / 1024;
sprintf(SupportedBlocks, "%u %c", (SupportedMBlocks>=5) ? SupportedMBlocks : SupportedKBlocks, (SupportedMBlocks>=5) ? 'M':'K');
MaxThrPerBlk = (ui)GPUprop.maxThreadsPerBlock;
cudaEventCreate(&time1); cudaEventCreate(&time2);
cudaEventCreate(&time2BW); cudaEventCreate(&time2Gauss); cudaEventCreate(&time2Sobel);
cudaEventCreate(&time3); cudaEventCreate(&time4);
cudaEventRecord(time1, 0); // Time stamp at the start of the GPU transfer
// Allocate GPU buffer for the input and output images and the imtermediate results
GPUtotalBufferSize = 4 * sizeof(double)*IMAGEPIX + 2 * sizeof(uch)*IMAGESIZE;
cudaStatus = cudaMalloc((void**)&GPUptr, GPUtotalBufferSize);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMalloc failed! Can't allocate GPU memory\n");
goto EXITERROR;
}
GPUImg = (uch *)GPUptr;
GPUImg32 = (ui *)GPUImg;
GPUResultImg = GPUImg + IMAGESIZE;
GPUBWImg = (double *)(GPUResultImg + IMAGESIZE);
GPUGaussImg = GPUBWImg + IMAGEPIX;
GPUGradient = GPUGaussImg + IMAGEPIX;
GPUTheta = GPUGradient + IMAGEPIX;
// Copy input vectors from host memory to GPU buffers.
cudaStatus = cudaMemcpy(GPUImg, TheImg, IMAGESIZE, cudaMemcpyHostToDevice);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy CPU to GPU failed!\n");
goto EXITCUDAERROR;
}
cudaEventRecord(time2, 0); // Time stamp after the CPU --> GPU tfr is done
// cudaDeviceSynchronize waits for the kernel to finish, and returns
// any errors encountered during the launch.
switch (BWKN){
case 1: BWKernel <<< NumBlocks, ThrPerBlk >>> (GPUBWImg, GPUImg, IPH);
strcpy(BWKernelName, "BWKernel: Everything is passed into the kernel");
break;
case 2: BWKernel2 <<< dimGrid2D, ThrPerBlk >>> (GPUBWImg, GPUImg, IPH, RowBytes);
strcpy(BWKernelName, "BWKernel2: Pre-computed values and 2D blocks");
break;
case 3: BWKernel3 <<< dimGrid2D4, ThrPerBlk >>> (GPUBWImg, GPUImg32, IPH, RowInts);
strcpy(BWKernelName, "BWKernel3: Calculates 4 pixels (3 int) at a time");
break;
default:printf("...... BW Kernel Number=%d ... NOT IMPLEMENTED .... \n", BWKN);
strcpy(BWKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2BW, 0); // Time stamp after BW image calculation
GPUDataTfrBW = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
switch (GaussKN){
case 1: GaussKernel <<< NumBlocks, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel: Everything is passed into the kernel");
break;
case 2: GaussKernel2 <<< dimGrid2D, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel2: Uses 2D blocks");
break;
case 3: GaussKernel3 <<< dimGrid2D, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel3: Stores filter coeff in constant memory");
break;
case 4: GaussKernel4 <<< dimGrid2D, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel4: Computes 1 pix/thread using Shared Memory");
break;
case 5: GaussKernel5 <<< dimGrid2D4, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel5: Computes 4 pix/thread using Shared Memory");
break;
case 6: GaussKernel6 <<< dimGrid2D, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel6: Each thread reads 5 rows of pixels into ShMem");
break;
case 7: GaussKernel7 <<< dimGrid2DG, ThrPerBlk >>> (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel7: Blocks read 5 rows, compute ThrPerBlk-4 pixels");
break;
case 8: GaussKernel8 << < dimGrid2DG8, ThrPerBlk >> > (GPUGaussImg, GPUBWImg, IPH, IPV);
strcpy(GaussKernelName, "GaussKernel8: Blocks read 12 vertical pixels, and compute 8");
break;
default:printf("...... Gauss Kernel Number=%d ... NOT IMPLEMENTED .... \n", GaussKN);
strcpy(GaussKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Gauss, 0); // Time stamp after Gauss image calculation
GPUDataTfrGauss = 2 * sizeof(double)*IMAGEPIX;
switch (SobelKN){
case 1: SobelKernel <<< NumBlocks, ThrPerBlk >>> (GPUGradient, GPUTheta, GPUGaussImg, IPH, IPV);
strcpy(SobelKernelName, "SobelKernel: Everything is passed into the kernel");
break;
default:printf("...... Sobel Kernel Number=%d ... NOT IMPLEMENTED .... \n", SobelKN);
strcpy(SobelKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
cudaEventRecord(time2Sobel, 0); // Time stamp after Gradient, Theta computation
GPUDataTfrSobel = 3 * sizeof(double)*IMAGEPIX;
switch (ThresholdKN){
case 1: ThresholdKernel <<< NumBlocks, ThrPerBlk >>> (GPUResultImg, GPUGradient, GPUTheta, IPH, IPV, ThreshLo, ThreshHi);
strcpy(ThresholdKernelName, "ThresholdKernel: Everything is passed into the kernel");
break;
default:printf("...... Threshold Kernel Number=%d ... NOT IMPLEMENTED .... \n",ThresholdKN);
strcpy(ThresholdKernelName, "*** NOT IMPLEMENTED ***");
break;
}
if ((cudaStatus = cudaDeviceSynchronize()) != cudaSuccess) goto KERNELERROR;
GPUDataTfrThresh = sizeof(double)*IMAGEPIX + sizeof(uch)*IMAGESIZE;
GPUDataTfrKernel = GPUDataTfrBW + GPUDataTfrGauss + GPUDataTfrSobel + GPUDataTfrThresh;
GPUDataTfrTotal = GPUDataTfrKernel + 2 * IMAGESIZE;
cudaEventRecord(time3, 0);
// Copy output (results) from GPU buffer to host (CPU) memory.
cudaStatus = cudaMemcpy(CopyImg, GPUResultImg, IMAGESIZE, cudaMemcpyDeviceToHost);
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaMemcpy GPU to CPU failed!");
goto EXITCUDAERROR;
}
cudaEventRecord(time4, 0);
cudaEventSynchronize(time1); cudaEventSynchronize(time2);
cudaEventSynchronize(time2BW); cudaEventSynchronize(time2Gauss); cudaEventSynchronize(time2Sobel);
cudaEventSynchronize(time3); cudaEventSynchronize(time4);
cudaEventElapsedTime(&totalTime, time1, time4);
cudaEventElapsedTime(&tfrCPUtoGPU, time1, time2);
cudaEventElapsedTime(&kernelExecTimeBW, time2, time2BW);
cudaEventElapsedTime(&kernelExecTimeGauss, time2BW, time2Gauss);
cudaEventElapsedTime(&kernelExecTimeSobel, time2Gauss, time2Sobel);
cudaEventElapsedTime(&kernelExecTimeThreshold, time2Sobel, time3);
cudaEventElapsedTime(&tfrGPUtoCPU, time3, time4);
totalKernelTime = kernelExecTimeBW + kernelExecTimeGauss + kernelExecTimeSobel + kernelExecTimeThreshold;
cudaStatus = cudaDeviceSynchronize();
//checkError(cudaGetLastError()); // screen for errors in kernel launches
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "\n Program failed after cudaDeviceSynchronize()!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
WriteBMPlin(CopyImg, OutputFileName); // Write the flipped image back to disk
printf("\n\n--------------------------------------------------------------------------------------------------\n");
printf("%s ComputeCapab=%d.%d [max %s blocks; %d thr/blk] \n",
GPUprop.name, GPUprop.major, GPUprop.minor, SupportedBlocks, MaxThrPerBlk);
printf("--------------------------------------------------------------------------------------------------\n");
printf("%s %s %s %u %d %d %d %d %d %d [Launched %u BLOCKS, %u BLOCKS/ROW]\n",
ProgName, InputFileName, OutputFileName, ThrPerBlk, ThreshLo, ThreshHi, BWKN, GaussKN, SobelKN, ThresholdKN, NumBlocks, BlkPerRow);
if (GaussKN == 7) {
printf(" Gauss Kernel 7: [Launched %u BLOCKS, %u BLOCKS/ROW]\n", NumBlocksG, BlkPerRowG);
}
if (GaussKN == 8) {
printf(" Gauss Kernel 8: [Launched %u BLOCKS, %u BLOCKS/ROW]\n", NumBlocksG8, BlkPerRowG);
}
printf("--------------------------------------------------------------------------------------------------\n");
printf(" CPU->GPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrCPUtoGPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrCPUtoGPU));
printf(" GPU->CPU Transfer =%7.2f ms ... %4d MB ... %6.2f GB/s\n", tfrGPUtoCPU, DATAMB(IMAGESIZE), DATABW(IMAGESIZE, tfrGPUtoCPU));
printf("----------------------------------------------------------------------------\n");
printf(" BW Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeBW, DATAMB(GPUDataTfrBW), DATABW(GPUDataTfrBW, kernelExecTimeBW));
printf(" Gauss Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeGauss, DATAMB(GPUDataTfrGauss), DATABW(GPUDataTfrGauss, kernelExecTimeGauss));
printf(" Sobel Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeSobel, DATAMB(GPUDataTfrSobel), DATABW(GPUDataTfrSobel, kernelExecTimeSobel));
printf("Threshold Kernel Execution Time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", kernelExecTimeThreshold, DATAMB(GPUDataTfrThresh), DATABW(GPUDataTfrThresh, kernelExecTimeThreshold));
printf("----------------------------------------------------------------------------\n");
printf(" Total Kernel-only time =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalKernelTime, DATAMB(GPUDataTfrKernel), DATABW(GPUDataTfrKernel, totalKernelTime));
printf(" Total time with I/O included =%7.2f ms ... %4d MB ... %6.2f GB/s\n", totalTime, DATAMB(GPUDataTfrTotal), DATABW(GPUDataTfrTotal, totalTime));
printf("----------------------------------------------------------------------------\n");
// Deallocate CPU, GPU memory and destroy events.
cudaFree(GPUptr);
cudaEventDestroy(time1); cudaEventDestroy(time2);
cudaEventDestroy(time2BW); cudaEventDestroy(time2Gauss); cudaEventDestroy(time2Sobel);
cudaEventDestroy(time3); cudaEventDestroy(time4);
// cudaDeviceReset must be called before exiting in order for profiling and
// tracing tools such as Parallel Nsight and Visual Profiler to show complete traces.
cudaStatus = cudaDeviceReset();
if (cudaStatus != cudaSuccess) {
fprintf(stderr, "cudaDeviceReset failed!");
free(TheImg);
free(CopyImg);
exit(EXIT_FAILURE);
}
free(TheImg);
free(CopyImg);
return(EXIT_SUCCESS);
KERNELERROR:
fprintf(stderr, "\n\ncudaDeviceSynchronize returned error code %d after launching the kernel!\n", cudaStatus);
EXITCUDAERROR:
cudaFree(GPUptr);
EXITERROR:
free(TheImg);
free(CopyImg);
return(EXIT_FAILURE);
}
|
d1450f2c9094df2fd469607790ed3f94e73ea40a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void kernel_max_pred_buffer(float * max_output3, float * pred, float * FL_pred, int N_C, int n_imgs, int n_filters, int conv_out_sz){
int r = blockIdx.x;
int cat = r / (n_filters*conv_out_sz*conv_out_sz);
r = r % (n_filters*conv_out_sz*conv_out_sz);
int f3 = r / (conv_out_sz*conv_out_sz);
r = r % (conv_out_sz*conv_out_sz);
int z1 = r / conv_out_sz;
int z2 = r % conv_out_sz;
int img = threadIdx.x;
if(img == 0)
FL_pred[blockIdx.x] = 0;
__syncthreads();
atomicAdd(&FL_pred[blockIdx.x], max_output3[img*(n_filters*conv_out_sz*conv_out_sz) + f3*(conv_out_sz*conv_out_sz) + z1*conv_out_sz + z2] *
pred[cat*n_imgs + img]);
return;
}
// dFL === np.einsum(max_output3, range(4), pred, [4,0], [4,1,2,3])
// the gradient for FL filters [N_C x n3 x max_output_sz3 x max_output_sz3], given max_output3 [n_imgs, n3, max_output_sz3**2], and pred [N_C x n_imgs]
static PyObject *max_pred_buffer(PyObject *self, PyObject *args){
hipError_t err;
cudnnStatus_t status;
int gpu_ind, out_ind, max3_ind, pred_ind, stream_ind;
if (!PyArg_ParseTuple(args, "iiiii", &max3_ind, &pred_ind, &out_ind, &stream_ind, &gpu_ind))
return NULL;
if(pred_ind >= N_BUFFERS || pred_ind < 0 || max3_ind >= N_BUFFERS || max3_ind < 0 ||
out_ind >= N_BUFFERS || out_ind < 0){
printf("invalid buffer index\n");
return NULL;
}
if(gpu_ind < 0 || gpu_ind > N_GPUS){
printf("invalid gpu index %i\n", gpu_ind);
return NULL;
}
if(stream_ind < 0 || stream_ind > N_ALT_STREAMS){
printf("invalid stream index %i\n", stream_ind);
return NULL;
}
if(data_buffers[gpu_ind][max3_ind] == NULL || data_2d_buffers[gpu_ind][pred_ind] == NULL){
printf("one or more buffers not initialized on this gpu\n");
return NULL;
}
if(filter_flags[gpu_ind][max3_ind] == 1){
printf("one or more buffers was not initialized correctly, filters when should be tensor or vice versa\n");
return NULL;
}
hipSetDevice(gpu_ind); CHECK_CUDA_ERR
cudnnSetStream(handle, alt_streams[gpu_ind][stream_ind]);
int n_imgs_out = data_dims[0][gpu_ind][max3_ind];
int N_C = data_2d_dims[0][gpu_ind][pred_ind];
int n_filters_out = data_dims[1][gpu_ind][max3_ind];
int conv_out_sz = data_dims[2][gpu_ind][max3_ind];
//--------------------------------------
// Set and allocate output tensor descriptor
//----------------------------------------
if(data_buffers[gpu_ind][out_ind] == NULL){ // allocate output
status = cudnnCreateTensor4dDescriptor(&desc_buffers[gpu_ind][out_ind]); ERR_CHECK
status = cudnnSetTensor4dDescriptor(desc_buffers[gpu_ind][out_ind], CUDNN_TENSOR_NCHW, dataType, N_C, n_filters_out,
conv_out_sz, conv_out_sz); ERR_CHECK
err = hipMalloc((void**) &data_buffers[gpu_ind][out_ind], N_C*n_filters_out*conv_out_sz*conv_out_sz * DATA_TYPE_SZ); MALLOC_ERR_CHECK
data_dims[0][gpu_ind][out_ind] = N_C;
data_dims[1][gpu_ind][out_ind] = n_filters_out;
data_dims[2][gpu_ind][out_ind] = conv_out_sz;
data_dims[3][gpu_ind][out_ind] = conv_out_sz;
filter_flags[gpu_ind][out_ind] = 0;
//-------------------------------------------
// check to make sure inputs match the previously initialized buffer sizes
//---------------------------------------------
}else if(data_dims[0][gpu_ind][out_ind] != N_C || data_dims[1][gpu_ind][out_ind] != n_filters_out || filter_flags[gpu_ind][out_ind] == 1 ||
data_dims[2][gpu_ind][out_ind] != conv_out_sz || data_dims[3][gpu_ind][out_ind] != conv_out_sz){
printf("---------------------------\ninput dimensions do not match the initial input dimensions on the first call to this function (%i %i %i %i), (%i %i %i)\n------------------\n", data_dims[0][gpu_ind][out_ind], data_dims[1][gpu_ind][out_ind],
data_dims[2][gpu_ind][out_ind], data_dims[3][gpu_ind][out_ind], N_C, n_filters_out, conv_out_sz);
return NULL;
}
/////////////////
int thread_sz = N_C*n_filters_out*conv_out_sz*conv_out_sz;
hipLaunchKernelGGL(( kernel_max_pred_buffer) , dim3(thread_sz), dim3(n_imgs_out) , 0, 0, data_buffers[gpu_ind][max3_ind], data_2d_buffers[gpu_ind][pred_ind], data_buffers[gpu_ind][out_ind], N_C, n_imgs_out, n_filters_out, conv_out_sz);
cudnnSetStream(handle, NULL);
hipSetDevice(0); CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
| d1450f2c9094df2fd469607790ed3f94e73ea40a.cu | __global__ void kernel_max_pred_buffer(float * max_output3, float * pred, float * FL_pred, int N_C, int n_imgs, int n_filters, int conv_out_sz){
int r = blockIdx.x;
int cat = r / (n_filters*conv_out_sz*conv_out_sz);
r = r % (n_filters*conv_out_sz*conv_out_sz);
int f3 = r / (conv_out_sz*conv_out_sz);
r = r % (conv_out_sz*conv_out_sz);
int z1 = r / conv_out_sz;
int z2 = r % conv_out_sz;
int img = threadIdx.x;
if(img == 0)
FL_pred[blockIdx.x] = 0;
__syncthreads();
atomicAdd(&FL_pred[blockIdx.x], max_output3[img*(n_filters*conv_out_sz*conv_out_sz) + f3*(conv_out_sz*conv_out_sz) + z1*conv_out_sz + z2] *
pred[cat*n_imgs + img]);
return;
}
// dFL === np.einsum(max_output3, range(4), pred, [4,0], [4,1,2,3])
// the gradient for FL filters [N_C x n3 x max_output_sz3 x max_output_sz3], given max_output3 [n_imgs, n3, max_output_sz3**2], and pred [N_C x n_imgs]
static PyObject *max_pred_buffer(PyObject *self, PyObject *args){
cudaError_t err;
cudnnStatus_t status;
int gpu_ind, out_ind, max3_ind, pred_ind, stream_ind;
if (!PyArg_ParseTuple(args, "iiiii", &max3_ind, &pred_ind, &out_ind, &stream_ind, &gpu_ind))
return NULL;
if(pred_ind >= N_BUFFERS || pred_ind < 0 || max3_ind >= N_BUFFERS || max3_ind < 0 ||
out_ind >= N_BUFFERS || out_ind < 0){
printf("invalid buffer index\n");
return NULL;
}
if(gpu_ind < 0 || gpu_ind > N_GPUS){
printf("invalid gpu index %i\n", gpu_ind);
return NULL;
}
if(stream_ind < 0 || stream_ind > N_ALT_STREAMS){
printf("invalid stream index %i\n", stream_ind);
return NULL;
}
if(data_buffers[gpu_ind][max3_ind] == NULL || data_2d_buffers[gpu_ind][pred_ind] == NULL){
printf("one or more buffers not initialized on this gpu\n");
return NULL;
}
if(filter_flags[gpu_ind][max3_ind] == 1){
printf("one or more buffers was not initialized correctly, filters when should be tensor or vice versa\n");
return NULL;
}
cudaSetDevice(gpu_ind); CHECK_CUDA_ERR
cudnnSetStream(handle, alt_streams[gpu_ind][stream_ind]);
int n_imgs_out = data_dims[0][gpu_ind][max3_ind];
int N_C = data_2d_dims[0][gpu_ind][pred_ind];
int n_filters_out = data_dims[1][gpu_ind][max3_ind];
int conv_out_sz = data_dims[2][gpu_ind][max3_ind];
//--------------------------------------
// Set and allocate output tensor descriptor
//----------------------------------------
if(data_buffers[gpu_ind][out_ind] == NULL){ // allocate output
status = cudnnCreateTensor4dDescriptor(&desc_buffers[gpu_ind][out_ind]); ERR_CHECK
status = cudnnSetTensor4dDescriptor(desc_buffers[gpu_ind][out_ind], CUDNN_TENSOR_NCHW, dataType, N_C, n_filters_out,
conv_out_sz, conv_out_sz); ERR_CHECK
err = cudaMalloc((void**) &data_buffers[gpu_ind][out_ind], N_C*n_filters_out*conv_out_sz*conv_out_sz * DATA_TYPE_SZ); MALLOC_ERR_CHECK
data_dims[0][gpu_ind][out_ind] = N_C;
data_dims[1][gpu_ind][out_ind] = n_filters_out;
data_dims[2][gpu_ind][out_ind] = conv_out_sz;
data_dims[3][gpu_ind][out_ind] = conv_out_sz;
filter_flags[gpu_ind][out_ind] = 0;
//-------------------------------------------
// check to make sure inputs match the previously initialized buffer sizes
//---------------------------------------------
}else if(data_dims[0][gpu_ind][out_ind] != N_C || data_dims[1][gpu_ind][out_ind] != n_filters_out || filter_flags[gpu_ind][out_ind] == 1 ||
data_dims[2][gpu_ind][out_ind] != conv_out_sz || data_dims[3][gpu_ind][out_ind] != conv_out_sz){
printf("---------------------------\ninput dimensions do not match the initial input dimensions on the first call to this function (%i %i %i %i), (%i %i %i)\n------------------\n", data_dims[0][gpu_ind][out_ind], data_dims[1][gpu_ind][out_ind],
data_dims[2][gpu_ind][out_ind], data_dims[3][gpu_ind][out_ind], N_C, n_filters_out, conv_out_sz);
return NULL;
}
/////////////////
int thread_sz = N_C*n_filters_out*conv_out_sz*conv_out_sz;
kernel_max_pred_buffer <<< thread_sz, n_imgs_out >>> (data_buffers[gpu_ind][max3_ind], data_2d_buffers[gpu_ind][pred_ind], data_buffers[gpu_ind][out_ind], N_C, n_imgs_out, n_filters_out, conv_out_sz);
cudnnSetStream(handle, NULL);
cudaSetDevice(0); CHECK_CUDA_ERR
Py_INCREF(Py_None);
return Py_None;
}
|
b4eb614fc30a612cf6d0d3d8d79e569a80e94d78.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef _TMV_KERNEL_H_
#define _TMV_KERNEL_H_
#include <stdio.h>
#include "tmv.h"
#define WIDTH_A WA
#define COALESCED_NUM 32
#define globalDimY 1
#define blockDimX 256
#define blockDimY 1
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_naive(float *A, float *B, float *C, int width) {
int i;
i = 0;
float sum;
sum = 0;
for (i=0; i<width; i=i+1) {
float a;
float b;
a = A(i, idx);
b = B[i];
sum += a*b;
}
C[idx] = sum;
}
#define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_coalesced(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
shared_0[(tidx+0)]=B[(i+tidx)];
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#define COALESCED_NUM 32
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_opt(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=B[(i+tidx)];
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#define COALESCED_NUM 32
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
__global__ void tmv_pref(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
float tmp_0;
if ((tidx<32))
{
tmp_0=B[(0+tidx)];
}
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=tmp_0;
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
if ((tidx<32))
{
if ((i<(width-32)))
{
tmp_0=B[((i+32)+tidx)];
}
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#endif // #ifndef _TMV_KERNEL_H_
| b4eb614fc30a612cf6d0d3d8d79e569a80e94d78.cu |
#ifndef _TMV_KERNEL_H_
#define _TMV_KERNEL_H_
#include <stdio.h>
#include "tmv.h"
#define WIDTH_A WA
#define COALESCED_NUM 32
#define globalDimY 1
#define blockDimX 256
#define blockDimY 1
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_naive(float *A, float *B, float *C, int width) {
int i;
i = 0;
float sum;
sum = 0;
for (i=0; i<width; i=i+1) {
float a;
float b;
a = A(i, idx);
b = B[i];
sum += a*b;
}
C[idx] = sum;
}
#define COALESCED_NUM 32
#define blockDimX 32
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_coalesced(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
shared_0[(tidx+0)]=B[(i+tidx)];
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#define COALESCED_NUM 32
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
#define A(y,x) A[(y)*WIDTH_A+(x)]
__global__ void tmv_opt(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=B[(i+tidx)];
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#define COALESCED_NUM 32
#define blockDimX 512
#define blockDimY 1
#define gridDimX (gridDim.x)
#define gridDimY (gridDim.y)
#define idx (blockIdx.x*blockDimX+threadIdx.x)
#define idy (blockIdx.y*blockDimY+threadIdx.y)
#define bidy (blockIdx.y)
#define bidx (blockIdx.x)
#define tidx (threadIdx.x)
#define tidy (threadIdx.y)
#define merger_y 1
#define coalesced_idy (bidy/(COALESCED_NUM/(merger_y*blockDimY))*COALESCED_NUM)
#define globalDimY 1
__global__ void tmv_pref(float * A, float * B, float * C, int width)
{
__shared__ float shared_0[32];
int i;
float sum;
i=0;
sum=0;
float tmp_0;
if ((tidx<32))
{
tmp_0=B[(0+tidx)];
}
for (i=0; i<width; i=(i+32))
{
int it_1;
if ((tidx<32))
{
shared_0[(tidx+0)]=tmp_0;
}
__syncthreads();
#pragma unroll
for (it_1=0; it_1<32; it_1=(it_1+1))
{
float a;
float b;
a=A((it_1+i), idx);
b=shared_0[it_1];
sum+=(a*b);
}
if ((tidx<32))
{
if ((i<(width-32)))
{
tmp_0=B[((i+32)+tidx)];
}
}
__syncthreads();
}
{
C[idx]=sum;
}
}
#endif // #ifndef _TMV_KERNEL_H_
|
e909f6ecdb37b323f14cc7a4b15ef20e10f0d950.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#include <iostream>
#include <stdlib.h>
#define cimg_use_jpeg
#include "CImg.h"
#define THREADS 128
static void CudaTest(const char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
static __global__ void blur(unsigned char *d_rout, unsigned char *d_gout, unsigned char *d_bout, unsigned char *d_rO, unsigned char *d_gO, unsigned char *d_bO, const int H, const int W) {
int index= (threadIdx.x) + blockIdx.x * blockDim.x;
int r,c;
//compute the blur
if (index < (H) * (W)) {
r = index / W;
c = index % W;
//red
d_rout[index] = (d_rO[(r+1) * W + c] + d_rO[(r-1) * W + c] +
d_rO[r * W + (c+1)] + d_rO[(r+1) * W + (c+1)] + d_rO[(r-1) * W + (c+1)] +
d_rO[r * W + (c-1)] + d_rO[(r+1) * W + (c-1)] + d_rO[(r-1) * W + (c-1)]) / 8;
//green
d_gout[index] = (d_gO[(r+1) * W + c] + d_gO[(r-1) * W + c] +
d_gO[r * W + (c+1)] + d_gO[(r+1) * W + (c+1)] + d_gO[(r-1) * W + (c+1)] +
d_gO[r * W + (c-1)] + d_gO[(r+1) * W + (c-1)] + d_gO[(r-1) * W + (c-1)]) / 8;
//blue
d_bout[index] = (d_bO[(r+1) * W + c] + d_bO[(r-1) * W + c] +
d_bO[r * W +(c+1)] + d_bO[(r+1) * W + (c+1)] + d_bO[(r-1) * W + (c+1)] +
d_bO[r * W + (c-1)] + d_bO[(r+1) * W + (c-1)] + d_bO[(r-1) * W + (c-1)]) / 8;
}
}
int main(int argc, char *argv[]) {
struct timeval start, end;
double runtime = 0.0, total = 0.0;
if (argc != 3) {
fprintf(stderr, "usage: exe, input file, number of groups\n"); exit(-1);
}
// import image from jpg file
cimg_library::CImg<unsigned char> input_img(argv[1]);
const int NUM_PART = atoi(argv[2]);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
printf("Running on %s\n", prop.name);
//create Height/Width variables for readability
const int H = input_img.height();
const int W = input_img.width();
const int Hout = H + 2;
const int Wout = W + 2;
//create height/offset for partitions
int offsetH = H / NUM_PART; //height for each group, not padded
//create GPU variables
unsigned char *d_rO, *d_gO, *d_bO, *d_rout, *d_gout, *d_bout;
unsigned char *rO, *gO, *bO, *rout, *gout, *bout;
//allocate CPU arrays
if(hipSuccess != hipHostMalloc(&rO, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
if(hipSuccess != hipHostMalloc(&gO, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
if(hipSuccess != hipHostMalloc(&bO, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
if(hipSuccess != hipHostMalloc(&rout, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
if(hipSuccess != hipHostMalloc(&gout, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
if(hipSuccess != hipHostMalloc(&bout, Wout * Hout * sizeof(unsigned char), hipHostMallocDefault)) fprintf(stderr, "Error allocating\n");
memset(rO, 0, Wout * Hout * sizeof(unsigned char));
memset(gO, 0, Wout * Hout * sizeof(unsigned char));
memset(bO, 0, Wout * Hout * sizeof(unsigned char));
//allocate GPU memory for arrays
hipMalloc((void**)&d_rO, Wout * (offsetH+2) * sizeof(unsigned char));
hipMalloc((void**)&d_gO, Wout * (offsetH+2) * sizeof(unsigned char));
hipMalloc((void**)&d_bO, Wout * (offsetH+2) * sizeof(unsigned char));
hipMalloc((void**)&d_rout, Wout * (offsetH+2) * sizeof(unsigned char));
hipMalloc((void**)&d_gout, Wout * (offsetH+2) * sizeof(unsigned char));
hipMalloc((void**)&d_bout, Wout * (offsetH+2) * sizeof(unsigned char));
//create cuda streams
hipStream_t streams[NUM_PART];
for(int i = 0; i < NUM_PART; i++) {hipStreamCreate(&streams[i]);}
//pad the image
for(int c = 0; c< W; c++) {
for(int r = 0; r < H; r++) {
rO[(r+1) * W + (c+1) ] = input_img(c, r, 0);
gO[(r+1) * W + (c+1) ] = input_img(c, r, 1);
bO[(r+1) * W + (c+1) ] = input_img(c, r, 2);
}
}
//create new image
cimg_library::CImg<unsigned char> output_img(W, H, 1, 3);
//loop over number of groups, calculate portion of blur for each
for(int i = 0; i < NUM_PART; i++) {
//send over padded image info to GPU
if(hipSuccess != hipMemcpyAsync(d_rO, &rO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), hipMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
if(hipSuccess != hipMemcpyAsync(d_gO, &gO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), hipMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
if(hipSuccess != hipMemcpyAsync(d_bO, &bO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), hipMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
//launch kernel
gettimeofday(&start, NULL);
hipLaunchKernelGGL(( blur), dim3(((offsetH+1) * (W+1) + THREADS - 1) / THREADS), dim3(THREADS), 0, streams[i], d_rout, d_gout, d_bout, d_rO, d_gO, d_bO, (offsetH+1), (W+1));
hipDeviceSynchronize();
gettimeofday(&end, NULL);
runtime = end.tv_sec + (end.tv_usec / 1000000.0) - start.tv_sec - (start.tv_usec / 1000000.0);
total+= runtime;
//send the blurred image info back to CPU
if(hipSuccess != hipMemcpyAsync(&rout[((i * (offsetH)) * Wout)], d_rout, W * (offsetH) * sizeof(unsigned char), hipMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
if(hipSuccess != hipMemcpyAsync(&gout[((i * (offsetH)) * Wout)], d_gout, W * (offsetH) * sizeof(unsigned char), hipMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
if(hipSuccess != hipMemcpyAsync(&bout[((i * (offsetH)) * Wout)], d_bout, W * (offsetH) * sizeof(unsigned char), hipMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
}
printf("\nCompute time for Blur: %.8f s\n", runtime);
for(int c = 0; c < W; c++) {
for(int r = 0; r < H; r++) {
output_img(c, r, 0) = rout[(r)* W + (c)];
output_img(c, r, 1) = gout[(r)* W + (c)];
output_img(c, r, 2) = bout[(r)* W + (c)];
}
}
for(int i = 0; i < NUM_PART; i ++) {
hipStreamSynchronize(streams[i]);
hipStreamDestroy(streams[i]);
}
//save output to file - commented out for experiment runs
//output_img.save_jpeg("output.jpg");
hipFree(d_rO); hipFree(d_gO); hipFree(d_bO);
hipFree(d_rout); hipFree(d_gout); hipFree(d_bout);
hipHostFree(rO); hipHostFree(gO); hipHostFree(bO); hipHostFree(rout); hipHostFree(gout); hipHostFree(bout);
return 0;
}
| e909f6ecdb37b323f14cc7a4b15ef20e10f0d950.cu | #include <cstdio>
#include <iostream>
#include <stdlib.h>
#define cimg_use_jpeg
#include "CImg.h"
#define THREADS 128
static void CudaTest(const char *msg)
{
cudaError_t e;
cudaDeviceSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
static __global__ void blur(unsigned char *d_rout, unsigned char *d_gout, unsigned char *d_bout, unsigned char *d_rO, unsigned char *d_gO, unsigned char *d_bO, const int H, const int W) {
int index= (threadIdx.x) + blockIdx.x * blockDim.x;
int r,c;
//compute the blur
if (index < (H) * (W)) {
r = index / W;
c = index % W;
//red
d_rout[index] = (d_rO[(r+1) * W + c] + d_rO[(r-1) * W + c] +
d_rO[r * W + (c+1)] + d_rO[(r+1) * W + (c+1)] + d_rO[(r-1) * W + (c+1)] +
d_rO[r * W + (c-1)] + d_rO[(r+1) * W + (c-1)] + d_rO[(r-1) * W + (c-1)]) / 8;
//green
d_gout[index] = (d_gO[(r+1) * W + c] + d_gO[(r-1) * W + c] +
d_gO[r * W + (c+1)] + d_gO[(r+1) * W + (c+1)] + d_gO[(r-1) * W + (c+1)] +
d_gO[r * W + (c-1)] + d_gO[(r+1) * W + (c-1)] + d_gO[(r-1) * W + (c-1)]) / 8;
//blue
d_bout[index] = (d_bO[(r+1) * W + c] + d_bO[(r-1) * W + c] +
d_bO[r * W +(c+1)] + d_bO[(r+1) * W + (c+1)] + d_bO[(r-1) * W + (c+1)] +
d_bO[r * W + (c-1)] + d_bO[(r+1) * W + (c-1)] + d_bO[(r-1) * W + (c-1)]) / 8;
}
}
int main(int argc, char *argv[]) {
struct timeval start, end;
double runtime = 0.0, total = 0.0;
if (argc != 3) {
fprintf(stderr, "usage: exe, input file, number of groups\n"); exit(-1);
}
// import image from jpg file
cimg_library::CImg<unsigned char> input_img(argv[1]);
const int NUM_PART = atoi(argv[2]);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
printf("Running on %s\n", prop.name);
//create Height/Width variables for readability
const int H = input_img.height();
const int W = input_img.width();
const int Hout = H + 2;
const int Wout = W + 2;
//create height/offset for partitions
int offsetH = H / NUM_PART; //height for each group, not padded
//create GPU variables
unsigned char *d_rO, *d_gO, *d_bO, *d_rout, *d_gout, *d_bout;
unsigned char *rO, *gO, *bO, *rout, *gout, *bout;
//allocate CPU arrays
if(cudaSuccess != cudaHostAlloc(&rO, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
if(cudaSuccess != cudaHostAlloc(&gO, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
if(cudaSuccess != cudaHostAlloc(&bO, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
if(cudaSuccess != cudaHostAlloc(&rout, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
if(cudaSuccess != cudaHostAlloc(&gout, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
if(cudaSuccess != cudaHostAlloc(&bout, Wout * Hout * sizeof(unsigned char), cudaHostAllocDefault)) fprintf(stderr, "Error allocating\n");
memset(rO, 0, Wout * Hout * sizeof(unsigned char));
memset(gO, 0, Wout * Hout * sizeof(unsigned char));
memset(bO, 0, Wout * Hout * sizeof(unsigned char));
//allocate GPU memory for arrays
cudaMalloc((void**)&d_rO, Wout * (offsetH+2) * sizeof(unsigned char));
cudaMalloc((void**)&d_gO, Wout * (offsetH+2) * sizeof(unsigned char));
cudaMalloc((void**)&d_bO, Wout * (offsetH+2) * sizeof(unsigned char));
cudaMalloc((void**)&d_rout, Wout * (offsetH+2) * sizeof(unsigned char));
cudaMalloc((void**)&d_gout, Wout * (offsetH+2) * sizeof(unsigned char));
cudaMalloc((void**)&d_bout, Wout * (offsetH+2) * sizeof(unsigned char));
//create cuda streams
cudaStream_t streams[NUM_PART];
for(int i = 0; i < NUM_PART; i++) {cudaStreamCreate(&streams[i]);}
//pad the image
for(int c = 0; c< W; c++) {
for(int r = 0; r < H; r++) {
rO[(r+1) * W + (c+1) ] = input_img(c, r, 0);
gO[(r+1) * W + (c+1) ] = input_img(c, r, 1);
bO[(r+1) * W + (c+1) ] = input_img(c, r, 2);
}
}
//create new image
cimg_library::CImg<unsigned char> output_img(W, H, 1, 3);
//loop over number of groups, calculate portion of blur for each
for(int i = 0; i < NUM_PART; i++) {
//send over padded image info to GPU
if(cudaSuccess != cudaMemcpyAsync(d_rO, &rO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
if(cudaSuccess != cudaMemcpyAsync(d_gO, &gO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
if(cudaSuccess != cudaMemcpyAsync(d_bO, &bO[(i * (offsetH)) * Wout], Wout * (offsetH+2) * sizeof(unsigned char), cudaMemcpyHostToDevice, streams[i])) fprintf(stderr, "copy to device failed\n");
//launch kernel
gettimeofday(&start, NULL);
blur<<<((offsetH+1) * (W+1) + THREADS - 1) / THREADS, THREADS, 0, streams[i]>>>(d_rout, d_gout, d_bout, d_rO, d_gO, d_bO, (offsetH+1), (W+1));
cudaDeviceSynchronize();
gettimeofday(&end, NULL);
runtime = end.tv_sec + (end.tv_usec / 1000000.0) - start.tv_sec - (start.tv_usec / 1000000.0);
total+= runtime;
//send the blurred image info back to CPU
if(cudaSuccess != cudaMemcpyAsync(&rout[((i * (offsetH)) * Wout)], d_rout, W * (offsetH) * sizeof(unsigned char), cudaMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
if(cudaSuccess != cudaMemcpyAsync(&gout[((i * (offsetH)) * Wout)], d_gout, W * (offsetH) * sizeof(unsigned char), cudaMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
if(cudaSuccess != cudaMemcpyAsync(&bout[((i * (offsetH)) * Wout)], d_bout, W * (offsetH) * sizeof(unsigned char), cudaMemcpyDeviceToHost, streams[i])) fprintf(stderr, "copy to host failed\n");
}
printf("\nCompute time for Blur: %.8f s\n", runtime);
for(int c = 0; c < W; c++) {
for(int r = 0; r < H; r++) {
output_img(c, r, 0) = rout[(r)* W + (c)];
output_img(c, r, 1) = gout[(r)* W + (c)];
output_img(c, r, 2) = bout[(r)* W + (c)];
}
}
for(int i = 0; i < NUM_PART; i ++) {
cudaStreamSynchronize(streams[i]);
cudaStreamDestroy(streams[i]);
}
//save output to file - commented out for experiment runs
//output_img.save_jpeg("output.jpg");
cudaFree(d_rO); cudaFree(d_gO); cudaFree(d_bO);
cudaFree(d_rout); cudaFree(d_gout); cudaFree(d_bout);
cudaFreeHost(rO); cudaFreeHost(gO); cudaFreeHost(bO); cudaFreeHost(rout); cudaFreeHost(gout); cudaFreeHost(bout);
return 0;
}
|
3f5ed829c1f0a499b4023681ec8e743ae69c2bcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
using namespace std;
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include <chrono>
using namespace std::chrono;
#define RND (hiprand_uniform(&local_rand_state))
#define seed 1000
__device__ vec3 color(const ray& r, hitable **world, hiprandState_t *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0);
}
__global__ void rand_init(hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprand_init(seed, 0, 0, rand_state); //cria uma seed na thread 0 que vai garantir mesma seed para todas as threads
}
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y))
return;
int pixel_index = j*max_x + i;
hiprand_init(seed, pixel_index, 0, &rand_state[pixel_index]); //repassa o mesmo rand_state para os threads dos blocos utilizados na compilacao para garantir consistencia.
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y))
return; // garante que nao vai rodar alem dos tamanho definido no kernel
int pixel_index = j*max_x + i; // calcula a posicao do pixel no kernel
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col; //coloca o resultado em fb para ser acessado do host ao final do cdigo
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, hiprandState_t *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
hiprandState_t local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(hiprand_uniform(&local_rand_state)*hiprand_uniform(&local_rand_state), RND*RND, RND*RND)));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22*22+1+3);
vec3 lookfrom(25,12,13);
vec3 lookat(0,0,0);
float dist_to_focus = 10.0; (lookfrom-lookat).length();
float aperture = 0.1;
*d_camera = new camera(lookfrom,
lookat,
vec3(0,1,0),
30.0,
float(nx)/float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { //deleta espaco alocado para cada esfera
for(int i=0; i < 22*22+1+3; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main() {
ofstream myfile;
myfile.open ("tempo.txt");
int num_testes = 30;
int prop;
for(int k = 1;k<num_testes;k++) {
prop = k;
int nx = (int) 1200/prop;
int ny = (int) 800/prop;
int ns = 10;
int tx = 8;
int ty = 8;
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3); //aloca tamanho do vetor fb para cada pixel caber um vec3
vec3 *fb;
hipMallocManaged((void **)&fb, fb_size); //aloca lista do tamanho do numero de pixels da imagem. Cudamallocmanage "copia" o mesmo endereco de memoria para CPU e GPU.
hiprandState_t *d_rand_state;
hipMalloc((void **)&d_rand_state, num_pixels*sizeof(hiprandState_t));
hiprandState_t *d_rand_state2;
hipMalloc((void **)&d_rand_state2, 1*sizeof(hiprandState_t));
hipLaunchKernelGGL(( rand_init), dim3(1),dim3(1), 0, 0, d_rand_state2); // inicializa kernel que cria seed no bloco 0 thread 0
//hipDeviceSynchronize(); - Utilizado apenas para debug, no necessrio dado que threads so assincrona entre si, mas sequenciais entre elas
hitable **d_list;
int num_hitables = 489; //tem que ser maior ou igual a 488 que o numero de bolinhas criadas :)
hipMalloc((void **)&d_list, num_hitables*sizeof(hitable *)); //aloca hitables
hitable **d_world;
hipMalloc((void **)&d_world, sizeof(hitable *));
camera **d_camera;
hipMalloc((void **)&d_camera, sizeof(camera *));
hipLaunchKernelGGL(( create_world), dim3(1),dim3(1), 0, 0, d_list, d_world, d_camera, nx, ny, d_rand_state2); //cria mundo randomico utilizando o estado incial criado por rand_init no bloco 0 thread 0
clock_t start, stop;
start = clock();
dim3 blocks(nx/tx+1,ny/ty+1); //define o numero de blocos (tx e ty so multiplos de 8 j que a arquitetura de 8x8 threads, garantindo que cada bloco faca um numero pareceido de processamento)
dim3 threads(tx,ty);
hipLaunchKernelGGL(( render_init), dim3(blocks), dim3(threads), 0, 0, nx, ny, d_rand_state); //cria o kernel de tamanho block x threads.
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, fb, nx, ny, ns, d_camera, d_world, d_rand_state); // renderiza a imagem no tamanho do bloco e threads estabelicidos, garantindo o mesmo cenrio para todas as threads (maior parte do processamento est aqui)
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
myfile << "Tamanho da Imagens x Tempo de Execuo: ";
myfile << "\n";
myfile << "Tamanho da Imagem: "<< nx <<" x " << ny << " - Tempo de Execuo: " << timer_seconds << "," << "\n"; //escreve tempo de execucao e tamanho da imagem da imagem rodada.
// Como estamos realizando diversos testes de tamanhos de imagem diferente, desejamos que apenas uma imagem seja criada para podermos analisar a qualidade
if(k==2){ //devolve apenas os pixels do tamanho de prop==2
hipDeviceSynchronize(); //garante que processamento j acabou para acessar dados de fb
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r()); //pega cor vermelha de fb
int ig = int(255.99*fb[pixel_index].g()); // pega cor verde de fb
int ib = int(255.99*fb[pixel_index].b());//pega cor azul de fb
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
// limpando a memoria alocada
hipLaunchKernelGGL(( free_world), dim3(1),dim3(1), 0, 0, d_list,d_world,d_camera);
hipFree(d_list);
hipFree(d_rand_state);
hipFree(fb);
hipFree(d_camera);
hipFree(d_world);
hipDeviceReset();
}
myfile.close(); //fecha arquivo de escrita
}
| 3f5ed829c1f0a499b4023681ec8e743ae69c2bcd.cu | #include <iostream>
#include <fstream>
using namespace std;
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
#include "material.h"
#include <chrono>
using namespace std::chrono;
#define RND (curand_uniform(&local_rand_state))
#define seed 1000
__device__ vec3 color(const ray& r, hitable **world, curandState *local_rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, local_rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0);
}
__global__ void rand_init(curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curand_init(seed, 0, 0, rand_state); //cria uma seed na thread 0 que vai garantir mesma seed para todas as threads
}
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y))
return;
int pixel_index = j*max_x + i;
curand_init(seed, pixel_index, 0, &rand_state[pixel_index]); //repassa o mesmo rand_state para os threads dos blocos utilizados na compilacao para garantir consistencia.
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= max_x) || (j >= max_y))
return; // garante que nao vai rodar alem dos tamanho definido no kernel
int pixel_index = j*max_x + i; // calcula a posicao do pixel no kernel
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0,0,0);
for(int s=0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v, &local_rand_state);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col; //coloca o resultado em fb para ser acessado do host ao final do código
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny, curandState *rand_state) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
curandState local_rand_state = *rand_state;
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = RND;
vec3 center(a+RND,0.2,b+RND);
if(choose_mat < 0.8f) {
d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(curand_uniform(&local_rand_state)*curand_uniform(&local_rand_state), RND*RND, RND*RND)));
}
else if(choose_mat < 0.95f) {
d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+RND), 0.5f*(1.0f+RND), 0.5f*(1.0f+RND)), 0.5f*RND));
}
else {
d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
*rand_state = local_rand_state;
*d_world = new hitable_list(d_list, 22*22+1+3);
vec3 lookfrom(25,12,13);
vec3 lookat(0,0,0);
float dist_to_focus = 10.0; (lookfrom-lookat).length();
float aperture = 0.1;
*d_camera = new camera(lookfrom,
lookat,
vec3(0,1,0),
30.0,
float(nx)/float(ny),
aperture,
dist_to_focus);
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) { //deleta espaco alocado para cada esfera
for(int i=0; i < 22*22+1+3; i++) {
delete ((sphere *)d_list[i])->mat_ptr;
delete d_list[i];
}
delete *d_world;
delete *d_camera;
}
int main() {
ofstream myfile;
myfile.open ("tempo.txt");
int num_testes = 30;
int prop;
for(int k = 1;k<num_testes;k++) {
prop = k;
int nx = (int) 1200/prop;
int ny = (int) 800/prop;
int ns = 10;
int tx = 8;
int ty = 8;
int num_pixels = nx*ny;
size_t fb_size = num_pixels*sizeof(vec3); //aloca tamanho do vetor fb para cada pixel caber um vec3
vec3 *fb;
cudaMallocManaged((void **)&fb, fb_size); //aloca lista do tamanho do numero de pixels da imagem. Cudamallocmanage "copia" o mesmo endereco de memoria para CPU e GPU.
curandState *d_rand_state;
cudaMalloc((void **)&d_rand_state, num_pixels*sizeof(curandState));
curandState *d_rand_state2;
cudaMalloc((void **)&d_rand_state2, 1*sizeof(curandState));
rand_init<<<1,1>>>(d_rand_state2); // inicializa kernel que cria seed no bloco 0 thread 0
//cudaDeviceSynchronize(); - Utilizado apenas para debug, não é necessário dado que threads são assincrona entre si, mas sequenciais entre elas
hitable **d_list;
int num_hitables = 489; //tem que ser maior ou igual a 488 que é o numero de bolinhas criadas :)
cudaMalloc((void **)&d_list, num_hitables*sizeof(hitable *)); //aloca hitables
hitable **d_world;
cudaMalloc((void **)&d_world, sizeof(hitable *));
camera **d_camera;
cudaMalloc((void **)&d_camera, sizeof(camera *));
create_world<<<1,1>>>(d_list, d_world, d_camera, nx, ny, d_rand_state2); //cria mundo randomico utilizando o estado incial criado por rand_init no bloco 0 thread 0
clock_t start, stop;
start = clock();
dim3 blocks(nx/tx+1,ny/ty+1); //define o numero de blocos (tx e ty são multiplos de 8 já que a arquitetura de 8x8 threads, garantindo que cada bloco faca um numero pareceido de processamento)
dim3 threads(tx,ty);
render_init<<<blocks, threads>>>(nx, ny, d_rand_state); //cria o kernel de tamanho block x threads.
render<<<blocks, threads>>>(fb, nx, ny, ns, d_camera, d_world, d_rand_state); // renderiza a imagem no tamanho do bloco e threads estabelicidos, garantindo o mesmo cenário para todas as threads (maior parte do processamento está aqui)
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
myfile << "Tamanho da Imagens x Tempo de Execução: ";
myfile << "\n";
myfile << "Tamanho da Imagem: "<< nx <<" x " << ny << " - Tempo de Execução: " << timer_seconds << "," << "\n"; //escreve tempo de execucao e tamanho da imagem da imagem rodada.
// Como estamos realizando diversos testes de tamanhos de imagem diferente, desejamos que apenas uma imagem seja criada para podermos analisar a qualidade
if(k==2){ //devolve apenas os pixels do tamanho de prop==2
cudaDeviceSynchronize(); //garante que processamento já acabou para acessar dados de fb
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r()); //pega cor vermelha de fb
int ig = int(255.99*fb[pixel_index].g()); // pega cor verde de fb
int ib = int(255.99*fb[pixel_index].b());//pega cor azul de fb
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
}
// limpando a memoria alocada
free_world<<<1,1>>>(d_list,d_world,d_camera);
cudaFree(d_list);
cudaFree(d_rand_state);
cudaFree(fb);
cudaFree(d_camera);
cudaFree(d_world);
cudaDeviceReset();
}
myfile.close(); //fecha arquivo de escrita
}
|
7e9b6c654add86f54d7e07b4e722de235601e296.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <Environment.h>
#include <loops/transform_same.h>
#include <types/types.h>
#include <op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename OpType>
__global__ void transformSameSimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformSame<X>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer, tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X>
_CUDA_H void TransformSame<X>::executeTransformShaped(dim3 launchDims, hipStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_SAME_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X>
template <typename OpType>
__device__ void TransformSame<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = static_cast<X*>(vx);
auto z = static_cast<X*>(vz);
auto params = static_cast<X*>(vparams);
auto reductionPointer = static_cast<X*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
} else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
auto zOffset = shape::getIndexOffset(i, zShapeInfo, length);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X>
template <typename OpType>
_CUDA_H void TransformSame<X>::intermediateShaped(dim3 launchDims, hipStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
hipLaunchKernelGGL(( transformSameSimple<X, OpType>), dim3(launchDims.x), dim3(launchDims.x), launchDims.z, *stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "transformSame(...) failed");
}
template<typename X>
void TransformSame<X>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
}
template<typename X>
template <typename OpType>
void TransformSame<X>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT TransformSame, , LIBND4J_TYPES);
}
}
| 7e9b6c654add86f54d7e07b4e722de235601e296.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author [email protected]
//
#include <Environment.h>
#include <loops/transform_same.h>
#include <types/types.h>
#include <op_boilerplate.h>
#include <loops/legacy_ops.h>
#include <helpers/DebugHelper.h>
using namespace simdOps;
template <typename X, typename OpType>
__global__ void transformSameSimple(void *x, Nd4jLong *xShapeInfo, int xRank,
void *params,
void *z, Nd4jLong *zShapeInfo, int zRank,
int *allocationPointer,
void *reductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
functions::transform::TransformSame<X>::template transformCuda<OpType>(x,xShapeInfo,params,z,zShapeInfo,allocationPointer,reductionPointer, tadShapeInfo, tadOffsets);
}
namespace functions {
namespace transform {
template<typename X>
_CUDA_H void TransformSame<X>::executeTransformShaped(dim3 launchDims, cudaStream_t *stream, int opNum, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
DISPATCH_BY_OPNUM_T(intermediateShaped, PARAMS(launchDims, stream, x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets), TRANSFORM_SAME_OPS);
DEBUG_KERNEL(stream, opNum);
}
template<typename X>
template <typename OpType>
__device__ void TransformSame<X>::transformCuda(void *vx, Nd4jLong *xShapeInfo,
void *vparams,
void *vz, Nd4jLong *zShapeInfo,
int *allocationPointer, void *vreductionPointer,
Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
auto x = static_cast<X*>(vx);
auto z = static_cast<X*>(vz);
auto params = static_cast<X*>(vparams);
auto reductionPointer = static_cast<X*>(vreductionPointer);
if(OpType::requiresSpecial) {
OpType::execSpecialCuda(x,xShapeInfo,z,zShapeInfo,params, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
return;
} else {
__shared__ Nd4jLong xEws;
__shared__ Nd4jLong zEws;
__shared__ char xOrder;
__shared__ char zOrder;
__shared__ Nd4jLong length;
if (threadIdx.x == 0) {
xEws = shape::elementWiseStride(xShapeInfo);
zEws = shape::elementWiseStride(zShapeInfo);
xOrder = shape::order(xShapeInfo);
zOrder = shape::order(zShapeInfo);
length = shape::length(xShapeInfo);
}
__syncthreads();
auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int totalThreads = gridDim.x * blockDim.x;
if(xEws > 0 && zEws > 0 && xOrder == zOrder) {
for (int i = tid; i < length; i += totalThreads)
z[i * zEws] = OpType::op(x[i * xEws], params);
}
else {
if(vx == vz) {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
z[xOffset] = OpType::op(x[xOffset], params);
}
}
else {
for (Nd4jLong i = tid; i < length; i+= totalThreads) {
auto xOffset = shape::getIndexOffset(i, xShapeInfo, length);
auto zOffset = shape::getIndexOffset(i, zShapeInfo, length);
z[zOffset] = OpType::op(x[xOffset], params);
}
}
}
}
};
template<typename X>
template <typename OpType>
_CUDA_H void TransformSame<X>::intermediateShaped(dim3 launchDims, cudaStream_t *stream, void *x, Nd4jLong *xShape, int xRank, void *extraParams, void *z, Nd4jLong *zShape, int zRank, int *allocationPointer, void *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
transformSameSimple<X, OpType><<<launchDims.x, launchDims.x, launchDims.z, *stream>>>(x, xShape, xRank, extraParams, z, zShape, zRank, allocationPointer, reductionPointer, tadShapeInfo, tadOffsets);
nd4j::DebugHelper::checkErrorCode(stream, "transformSame(...) failed");
}
template<typename X>
void TransformSame<X>::exec(int opNum, void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
}
template<typename X>
template <typename OpType>
void TransformSame<X>::exec(void *dx, Nd4jLong *xShapeInfo, void *result, Nd4jLong *resultShapeInfo, void *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {
}
BUILD_SINGLE_TEMPLATE(template class ND4J_EXPORT TransformSame, , LIBND4J_TYPES);
}
}
|
aef447e110573aa67a6ae08b2e049993b7d0a1d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#define MAX_RANGE_DIFF 255
//Gauss fggvnyt szmt. Fontos: x ngyzett kell tadni neki, meg a sigma-t.
__device__ float gauss(float x_square, float sigma)
{
return expf(- x_square / (2 * sigma * sigma));
}
//A trbeli kernelt elre kiszmtom, hogy ne kelljen egy adott pixel esetn a szummzs egy adott lpsben exponencilis fggvnyt
//szmtani, mert ez drga mvlet. Egyszerbb az, ha a lehetsges rtkeket kiszmtjuk, ezt betesszk egy mtrixba (illetve egy tmbbe)
//ezt a tmbt bettjk a shared memriba is innen szedjk majd el az rtkeket.
//a spatialKernel tmb tartalmazza a lehetsges trbeli eltrsekhez tartoz Gauss fggvny rtkeket.
//r: a trbeli kernel sugara (vagyis kt pixel kztti legnagyobb trbeli eltrs, amit figyelembe vesznk, r).
//sigma: a spatial Gaussian-hoz tartot sigma.
__global__ void createSpatialKernel(float *spatialKernel, int r, float sigma)
{
int n = 2 * r + 1; //a kernel oldalnak hossza
int i = blockIdx.x - r; //oszlop index a spatial kernelben
int j = blockIdx.y - r; //sor index a spatial kernelben
float x_square = (float)(i * i + j * j);
spatialKernel[blockIdx.x + n * blockIdx.y] = gauss(x_square, sigma);
}
//Kt pixel intenzitsnak klnbsge 255*2+1 = 511 fle rtk lehet (a legkisebb 0-255 = -255, a legnagyobb 255 - 0 = 255)
//rdemes az ezekhez tartoz Gauss rtkeket is kiszmtani, mert adott kt pixelhez tartoz G(I_i - I_j) (az inenzts klnbsghez tartoz Gauss)
//kiszmtsa kltsges mvelet, 511 pedig nem olyan nagy szm. Ez hasonl az elz spatial kernelhez.
//a lehetsges intenzits klnbsgekhez tartoz Gauss rtkeket trol tmbt rangeKernel-nek nevezem (nem precz).
//az intenzits klnbsg abszolt rtknek maximuma MAX_RANGE_DIFF
__global__ void createRangeKernel(float *rangeKernel, float sigma)
{
//elszr csak a pozitv delte I -khez tartoz Gausst szmtjuk ki, mert szimmetrikus a fggvny
int tid = threadIdx.x;
if (tid >= MAX_RANGE_DIFF) {
int deltaI = tid - MAX_RANGE_DIFF;
rangeKernel[tid] = gauss((float)(deltaI * deltaI), sigma);
}
__syncthreads();
//tmsoljuk a negatv intenzits klnbsg rtkekhez tartoz Gauss rtkeket
int last = MAX_RANGE_DIFF * 2; //=510
if (tid < MAX_RANGE_DIFF) {
rangeKernel[tid] = rangeKernel[last - tid];
}
}
//A bilaterel filtert megvalst cuda kernel.
//es kt argumentum: a bemen is kimen kp pixeleinek intenzits rtkeit tartalmaz tmbk
//spatialKernel, rangeKernel: lehetsges a trbeli s intenzitsbeli klnbsgekhez tartoz Gauss rtkeket trol tmbk.
//Ezekbl sokszor olvasunk, ezrt ezeket a shared memriba msoljuk.
//r: a spatial kernel sugara ; width, height: a kp szlessge s magassg, pixelben.
__global__ void bilateralFilter(unsigned char *in, unsigned char *out, float *spatialKernel, float *rangeKernel, int r,
int width, int height)
{
int n = 2 * r + 1; //a spatial kernel oldalnak hossza
int spatialKernelSize = n * n;
extern __shared__ float sharedData[]; //A shared memory trolja a spatial kernel s a rangeKernel rtkeit is, egyms utn folytonosan
float *pSpatialKernel = &sharedData[r * n + r]; //a shared memory spatial kernelt trol rsznek kzepre mutat pointer
float *pRangeKernel = &sharedData[spatialKernelSize + MAX_RANGE_DIFF]; //a shared memory range kernelt trol rsznek kzepre mutat
//A shared memory feltltse:
//1. minden thread tmsolja a megfelel spatialKernel elemet
int index = threadIdx.x + blockDim.x * threadIdx.y;
int step = blockDim.x * blockDim.y; //az sszes thread szma a blockban
while (index < spatialKernelSize) {
sharedData[index] = spatialKernel[index];
index += step;
}
//2. minden thread tmsolja a megfelel rangeKernel elemet
index = threadIdx.x + blockDim.x * threadIdx.y;
int rangeKernelSize = 2 * MAX_RANGE_DIFF + 1; //=511
while (index < rangeKernelSize) {
sharedData[index + spatialKernelSize] = rangeKernel[index];
index += step;
}
__syncthreads();
//megvagyunk a shared memory feltltsvel, jhet a lnyeg:
int x = threadIdx.x + blockIdx.x * blockDim.x; //pixel koordintk kiszmtsa
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) { //csak az rvnyes pixeleket nzzk
int offset = x + y * width; //a pixel intenzitst trol memria indexe az in s out tmbkben
float summa = 0.0f, weightSumma = 0.0f;
int intensity = in[offset]; //az adott pixel intenzitsa
for (int j = -r; j <= r; ++j) { //j: sorindex
int yj = y + j; //az aktulisan vizsglt pixel y koordintja
for (int i = -r; i <= r; ++i) { //i: oszlopindex
int xi = x + i; //az aktulisan vizsglt pixel x koordintja
if (xi >= 0 && xi < width && yj >= 0 && yj < height) {
int offsetij = xi + yj * width; //az xi , yj pixel intenzitst trol memria indexe
int intensityij = in[offsetij]; //az xi, yj pixel intenzitsa
int deltaI = intensityij - intensity; //az intenzitsok klnbsge
float temp = pSpatialKernel[i + j * n] * pRangeKernel[deltaI];
weightSumma += temp;
summa += temp * intensityij;
}
}
}
out[offset] = (weightSumma == 0.0f) ? 0 : ((unsigned char)(summa / weightSumma)); //TODO: inkbb kerektsen, mint levgjon
}
}
| aef447e110573aa67a6ae08b2e049993b7d0a1d4.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "math.h"
#define MAX_RANGE_DIFF 255
//Gauss függvényt számít. Fontos: x négyzetét kell átadni neki, meg a sigma-t.
__device__ float gauss(float x_square, float sigma)
{
return expf(- x_square / (2 * sigma * sigma));
}
//A térbeli kernelt előre kiszámítom, hogy ne kelljen egy adott pixel esetén a szummázás egy adott lépésében exponenciális függvényt
//számítani, mert ez drága művlet. Egyszerűbb az, ha a lehetséges értékeket kiszámítjuk, ezt betesszük egy mátrixba (illetve egy tömbbe)
//ezt a tömböt betötjük a shared memóriába is innen szedjük majd elő az értékeket.
//a spatialKernel tömb tartalmazza a lehetséges térbeli eltérésekhez tartozó Gauss függvény értékeket.
//r: a térbeli kernel sugara (vagyis két pixel közötti legnagyobb térbeli eltérés, amit figyelembe veszünk, r).
//sigma: a spatial Gaussian-hoz tartotó sigma.
__global__ void createSpatialKernel(float *spatialKernel, int r, float sigma)
{
int n = 2 * r + 1; //a kernel oldalának hossza
int i = blockIdx.x - r; //oszlop index a spatial kernelben
int j = blockIdx.y - r; //sor index a spatial kernelben
float x_square = (float)(i * i + j * j);
spatialKernel[blockIdx.x + n * blockIdx.y] = gauss(x_square, sigma);
}
//Két pixel intenzitásának különbsége 255*2+1 = 511 féle érték lehet (a legkisebb 0-255 = -255, a legnagyobb 255 - 0 = 255)
//érdemes az ezekhez tartozó Gauss értékeket is kiszámítani, mert adott két pixelhez tartozó G(I_i - I_j) (az inenztás különbséghez tartozó Gauss)
//kiszámítása költséges művelet, 511 pedig nem olyan nagy szám. Ez hasonló az előző spatial kernelhez.
//a lehetséges intenzitás különbségekhez tartozó Gauss értékeket tároló tömböt rangeKernel-nek nevezem (nem precíz).
//az intenzitás különbség abszolút értékének maximuma MAX_RANGE_DIFF
__global__ void createRangeKernel(float *rangeKernel, float sigma)
{
//először csak a pozitív delte I -khez tartozó Gausst számítjuk ki, mert szimmetrikus a függvény
int tid = threadIdx.x;
if (tid >= MAX_RANGE_DIFF) {
int deltaI = tid - MAX_RANGE_DIFF;
rangeKernel[tid] = gauss((float)(deltaI * deltaI), sigma);
}
__syncthreads();
//átmásoljuk a negatív intenzitás különbség értkekhez tartozó Gauss értékeket
int last = MAX_RANGE_DIFF * 2; //=510
if (tid < MAX_RANGE_DIFF) {
rangeKernel[tid] = rangeKernel[last - tid];
}
}
//A bilaterel filtert megvalósító cuda kernel.
//eső két argumentum: a bemenő is kimenő kép pixeleinek intenzitás értékeit tartalmazó tömbök
//spatialKernel, rangeKernel: lehetséges a térbeli és intenzitásbeli különbségekhez tartozó Gauss értékeket tároló tömbök.
//Ezekből sokszor olvasunk, ezért ezeket a shared memóriába másoljuk.
//r: a spatial kernel sugara ; width, height: a kép szélessége és magasság, pixelben.
__global__ void bilateralFilter(unsigned char *in, unsigned char *out, float *spatialKernel, float *rangeKernel, int r,
int width, int height)
{
int n = 2 * r + 1; //a spatial kernel oldalának hossza
int spatialKernelSize = n * n;
extern __shared__ float sharedData[]; //A shared memory tárolja a spatial kernel és a rangeKernel értékeit is, egymás után folytonosan
float *pSpatialKernel = &sharedData[r * n + r]; //a shared memory spatial kernelt tároló részének közepére mutató pointer
float *pRangeKernel = &sharedData[spatialKernelSize + MAX_RANGE_DIFF]; //a shared memory range kernelt tároló részének közepére mutat
//A shared memory feltöltése:
//1. minden thread átmásolja a megfelelő spatialKernel elemet
int index = threadIdx.x + blockDim.x * threadIdx.y;
int step = blockDim.x * blockDim.y; //az összes thread száma a blockban
while (index < spatialKernelSize) {
sharedData[index] = spatialKernel[index];
index += step;
}
//2. minden thread átmásolja a megfelelő rangeKernel elemet
index = threadIdx.x + blockDim.x * threadIdx.y;
int rangeKernelSize = 2 * MAX_RANGE_DIFF + 1; //=511
while (index < rangeKernelSize) {
sharedData[index + spatialKernelSize] = rangeKernel[index];
index += step;
}
__syncthreads();
//megvagyunk a shared memory feltöltésével, jöhet a lényeg:
int x = threadIdx.x + blockIdx.x * blockDim.x; //pixel koordináták kiszámítása
int y = threadIdx.y + blockIdx.y * blockDim.y;
if (x < width && y < height) { //csak az érvényes pixeleket nézzük
int offset = x + y * width; //a pixel intenzitását tároló memória indexe az in és out tömbökben
float summa = 0.0f, weightSumma = 0.0f;
int intensity = in[offset]; //az adott pixel intenzitása
for (int j = -r; j <= r; ++j) { //j: sorindex
int yj = y + j; //az aktuálisan vizsgált pixel y koordinátája
for (int i = -r; i <= r; ++i) { //i: oszlopindex
int xi = x + i; //az aktuálisan vizsgált pixel x koordinátája
if (xi >= 0 && xi < width && yj >= 0 && yj < height) {
int offsetij = xi + yj * width; //az xi , yj pixel intenzitását tároló memória indexe
int intensityij = in[offsetij]; //az xi, yj pixel intenzitása
int deltaI = intensityij - intensity; //az intenzitások különbsége
float temp = pSpatialKernel[i + j * n] * pRangeKernel[deltaI];
weightSumma += temp;
summa += temp * intensityij;
}
}
}
out[offset] = (weightSumma == 0.0f) ? 0 : ((unsigned char)(summa / weightSumma)); //TODO: inkább kerekítsen, mint levágjon
}
}
|
c6d3c19b17dd9a1899be2d00335723e9b68ec153.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <errno.h>
#include "error.h"
#include "cudaerror.h"
void ecudaMalloc(void** devptr, size_t size) {
errno = 0;
hipError_t res = hipMalloc(devptr, size);
if (res != hipSuccess) {
print_error_msg("call to hipMalloc failed: %s", hipGetErrorString(res));
exit(-1);
}
return ;
}
void ecudaMemcpy(void* dst, const void* src, size_t count, enum hipMemcpyKind kind) {
errno = 0;
hipError_t res = hipMemcpy(dst, src, count, kind);
if (res != hipSuccess) {
print_error_msg("call to hipMemcpy failed: %s", hipGetErrorString(res));
exit(-1);
}
return ;
}
void ecudaFree(void* devptr) {
errno = 0;
hipError_t res = hipFree(devptr);
if (res != hipSuccess) {
print_error_msg("call to hipMemcpy failed: %s", hipGetErrorString(res));
exit(-1);
}
return ;
}
| c6d3c19b17dd9a1899be2d00335723e9b68ec153.cu | #include <stdlib.h>
#include <errno.h>
#include "error.h"
#include "cudaerror.h"
void ecudaMalloc(void** devptr, size_t size) {
errno = 0;
cudaError_t res = cudaMalloc(devptr, size);
if (res != cudaSuccess) {
print_error_msg("call to cudaMalloc failed: %s", cudaGetErrorString(res));
exit(-1);
}
return ;
}
void ecudaMemcpy(void* dst, const void* src, size_t count, enum cudaMemcpyKind kind) {
errno = 0;
cudaError_t res = cudaMemcpy(dst, src, count, kind);
if (res != cudaSuccess) {
print_error_msg("call to cudaMemcpy failed: %s", cudaGetErrorString(res));
exit(-1);
}
return ;
}
void ecudaFree(void* devptr) {
errno = 0;
cudaError_t res = cudaFree(devptr);
if (res != cudaSuccess) {
print_error_msg("call to cudaMemcpy failed: %s", cudaGetErrorString(res));
exit(-1);
}
return ;
}
|
80a7623000c6f647f5d15a092da9a13e8945a3e8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifdef USE_ROCM
#include "dragon/core/context_cuda.h"
#include "dragon/utils/conversions.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
#if __CUDA_ARCH__ >= 350
#define LDG(x, i) __ldg(x + i)
#define LDG2(x, i) convert::To<AccT>(__ldg(x + i))
#else
#define LDG(x, i) x[i]
#define LDG2(x, i) convert::To<AccT>(x[i])
#endif
template <typename T, typename AccT>
__global__ void _RowwiseMoments(
const int rows,
const int cols,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(rows);
CUDA_2D_KERNEL_LOOP1(i, cols) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int xi = j * cols + i;
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT>
__global__ void _ColwiseMoments(
const int rows,
const int cols,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int xi = i * cols + j;
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT, int D>
__global__ void _GenericMoments(
const int rows,
const int cols,
const int num_dims,
const SimpleArray<int, D> x_dims,
const SimpleArray<int, D> x_strides,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, cols) {
int xi = 0, c = i * cols + j;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(x_dims.data[d], c, &c, &r);
xi += r * x_strides.data[d];
}
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT>
void _Moments(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* x,
AccT* mean,
AccT* var,
CUDAContext* ctx) {
int rows, cols;
vec32_t out_dims(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
out_dims[axes[i]] = 1;
}
if (math::utils::IsRowwiseReduce(
num_dims, dims, out_dims.data(), &rows, &cols)) {
hipLaunchKernelGGL(( _RowwiseMoments),
dim3(CUDA_2D_BLOCKS(cols)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(), rows, cols, x, mean, var);
return;
}
if (math::utils::IsColwiseReduce(
num_dims, dims, out_dims.data(), &rows, &cols)) {
hipLaunchKernelGGL(( _ColwiseMoments),
dim3(CUDA_2D_BLOCKS(rows)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(), rows, cols, x, mean, var);
return;
}
CUDA_TENSOR_DIMS_CHECK(num_dims);
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_axes;
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_strides;
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_dims;
math::utils::TransposeAxesForReduce(
num_dims, num_axes, axes, transpose_axes.data);
math::utils::ComputeTransposeStrides(
num_dims, dims, transpose_axes.data, transpose_strides.data);
rows = cols = 1;
const int pivot = num_dims - num_axes;
for (int i = 0; i < pivot; ++i) {
rows *= dims[transpose_axes.data[i]];
}
for (int i = pivot; i < num_dims; ++i) {
cols *= dims[transpose_axes.data[i]];
}
for (int i = 0; i < num_dims; ++i) {
transpose_dims.data[i] = dims[transpose_axes.data[i]];
}
hipLaunchKernelGGL(( _GenericMoments),
dim3(CUDA_2D_BLOCKS(rows)),
dim3(CUDA_THREADS),
0,
ctx->cuda_stream(),
rows, cols, num_dims, transpose_dims, transpose_strides, x, mean, var);
}
#undef LDG
#undef LDG2
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T, AccT) \
template <> \
void Moments<T, AccT, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* x, \
AccT* mean, \
AccT* var, \
CUDAContext* ctx) { \
_Moments( \
num_dims, \
dims, \
num_axes, \
axes, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mean, \
var, \
ctx); \
}
DEFINE_KERNEL_LAUNCHER(int8_t, float);
DEFINE_KERNEL_LAUNCHER(uint8_t, float);
DEFINE_KERNEL_LAUNCHER(int, float);
DEFINE_KERNEL_LAUNCHER(int64_t, double);
DEFINE_KERNEL_LAUNCHER(float16, float);
DEFINE_KERNEL_LAUNCHER(float, float);
DEFINE_KERNEL_LAUNCHER(double, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_ROCM
| 80a7623000c6f647f5d15a092da9a13e8945a3e8.cu | #ifdef USE_CUDA
#include "dragon/core/context_cuda.h"
#include "dragon/utils/conversions.h"
#include "dragon/utils/device/common_cub.h"
#include "dragon/utils/math_functions.h"
#include "dragon/utils/op_kernels.h"
namespace dragon {
namespace kernel {
namespace {
#if __CUDA_ARCH__ >= 350
#define LDG(x, i) __ldg(x + i)
#define LDG2(x, i) convert::To<AccT>(__ldg(x + i))
#else
#define LDG(x, i) x[i]
#define LDG2(x, i) convert::To<AccT>(x[i])
#endif
template <typename T, typename AccT>
__global__ void _RowwiseMoments(
const int rows,
const int cols,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(rows);
CUDA_2D_KERNEL_LOOP1(i, cols) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, rows) {
const int xi = j * cols + i;
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT>
__global__ void _ColwiseMoments(
const int rows,
const int cols,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, cols) {
const int xi = i * cols + j;
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT, int D>
__global__ void _GenericMoments(
const int rows,
const int cols,
const int num_dims,
const SimpleArray<int, D> x_dims,
const SimpleArray<int, D> x_strides,
const T* x,
AccT* mean,
AccT* var) {
__shared__ typename BlockReduce<AccT>::TempStorage m_storage;
__shared__ typename BlockReduce<AccT>::TempStorage v_storage;
const AccT scale = AccT(1) / AccT(cols);
CUDA_2D_KERNEL_LOOP1(i, rows) {
AccT m_val = AccT(0), v_val = AccT(0);
CUDA_2D_KERNEL_LOOP2(j, cols) {
int xi = 0, c = i * cols + j;
for (int d = num_dims - 1; d >= 0; --d) {
int r;
FIXED_DIVISOR_DIV_MOD(x_dims.data[d], c, &c, &r);
xi += r * x_strides.data[d];
}
m_val += LDG2(x, xi);
v_val += math::utils::Square(LDG2(x, xi));
}
m_val = BlockReduce<AccT>(m_storage).Sum(m_val);
v_val = BlockReduce<AccT>(v_storage).Sum(v_val);
if (threadIdx.x == 0) {
const AccT mu = m_val * scale;
mean[i] = mu;
var[i] = v_val * scale - mu * mu;
}
}
}
template <typename T, typename AccT>
void _Moments(
const int num_dims,
const int* dims,
const int num_axes,
const int* axes,
const T* x,
AccT* mean,
AccT* var,
CUDAContext* ctx) {
int rows, cols;
vec32_t out_dims(dims, dims + num_dims);
for (int i = 0; i < num_axes; ++i) {
out_dims[axes[i]] = 1;
}
if (math::utils::IsRowwiseReduce(
num_dims, dims, out_dims.data(), &rows, &cols)) {
_RowwiseMoments<<<
CUDA_2D_BLOCKS(cols),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(rows, cols, x, mean, var);
return;
}
if (math::utils::IsColwiseReduce(
num_dims, dims, out_dims.data(), &rows, &cols)) {
_ColwiseMoments<<<
CUDA_2D_BLOCKS(rows),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(rows, cols, x, mean, var);
return;
}
CUDA_TENSOR_DIMS_CHECK(num_dims);
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_axes;
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_strides;
SimpleArray<int, CUDA_TENSOR_MAX_DIMS> transpose_dims;
math::utils::TransposeAxesForReduce(
num_dims, num_axes, axes, transpose_axes.data);
math::utils::ComputeTransposeStrides(
num_dims, dims, transpose_axes.data, transpose_strides.data);
rows = cols = 1;
const int pivot = num_dims - num_axes;
for (int i = 0; i < pivot; ++i) {
rows *= dims[transpose_axes.data[i]];
}
for (int i = pivot; i < num_dims; ++i) {
cols *= dims[transpose_axes.data[i]];
}
for (int i = 0; i < num_dims; ++i) {
transpose_dims.data[i] = dims[transpose_axes.data[i]];
}
_GenericMoments<<<
CUDA_2D_BLOCKS(rows),
CUDA_THREADS,
0,
ctx->cuda_stream()>>>(
rows, cols, num_dims, transpose_dims, transpose_strides, x, mean, var);
}
#undef LDG
#undef LDG2
} // namespace
/* ------------------- Launcher Separator ------------------- */
#define DEFINE_KERNEL_LAUNCHER(T, AccT) \
template <> \
void Moments<T, AccT, CUDAContext>( \
const int num_dims, \
const int* dims, \
const int num_axes, \
const int* axes, \
const T* x, \
AccT* mean, \
AccT* var, \
CUDAContext* ctx) { \
_Moments( \
num_dims, \
dims, \
num_axes, \
axes, \
reinterpret_cast<const math::ScalarType<T>::type*>(x), \
mean, \
var, \
ctx); \
}
DEFINE_KERNEL_LAUNCHER(int8_t, float);
DEFINE_KERNEL_LAUNCHER(uint8_t, float);
DEFINE_KERNEL_LAUNCHER(int, float);
DEFINE_KERNEL_LAUNCHER(int64_t, double);
DEFINE_KERNEL_LAUNCHER(float16, float);
DEFINE_KERNEL_LAUNCHER(float, float);
DEFINE_KERNEL_LAUNCHER(double, double);
#undef DEFINE_KERNEL_LAUNCHER
} // namespace kernel
} // namespace dragon
#endif // USE_CUDA
|
ea3b72ca8415d389ab405a98845eaf9fc378db9a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stack>
#include "paddle/fluid/operators/math/tree2col.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
using Node = paddle::operators::math::TreeNode;
template <typename T>
__global__ void tree2col(const T* eta, const int* node, const int* index,
const T* vectors, T* result, int feature_size, int n) {
const int thread_id =
(blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
const int patch_id = thread_id / feature_size;
const int j = thread_id % feature_size;
if (patch_id < n) {
const int begin_o = patch_id * 3 * feature_size;
const int begin = index[patch_id * 2], end = index[patch_id * 2 + 1];
T res_l = 0, res_r = 0, res_t = 0;
for (int i = begin; i < end; i++) {
const int id = node[i];
const T vec = vectors[id * feature_size + j];
res_l += eta[i * 3] * vec;
res_r += eta[i * 3 + 1] * vec;
res_t += eta[i * 3 + 2] * vec;
}
result[begin_o + j * 3] = res_l;
result[begin_o + j * 3 + 1] = res_r;
result[begin_o + j * 3 + 2] = res_t;
}
}
template <typename T>
class Tree2ColFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const paddle::platform::CUDADeviceContext& context,
const framework::Tensor& EdgeSet,
const framework::Tensor& node_features,
framework::Tensor* patch, int max_depth) {
std::vector<std::vector<int>> tr;
auto gpu_place = context.GetPlace();
auto cpu_place = platform::CPUPlace();
auto stream = context.stream();
auto feature_dims = node_features.dims();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant;
Tensor EdgeSet_cpu;
framework::TensorCopy(EdgeSet, cpu_place, &EdgeSet_cpu);
int64_t feature_size = feature_dims[1];
size_t patch_elem_size = 3 * static_cast<size_t>(feature_size);
size_t node_count = 0, patch_count = 0, total_size = 0;
size_t max_size = feature_dims[0];
Tree2ColUtil::construct_tree(EdgeSet_cpu, &tr, &node_count);
std::vector<std::vector<Node>> processing_list;
for (size_t u = 1; u <= node_count; u++) {
std::vector<Node> tmp = Tree2ColUtil::construct_patch(u, max_depth, tr);
if (!tmp.empty()) {
processing_list.push_back(tmp);
total_size += tmp.size();
}
}
size_t patch_size = processing_list.size();
Tensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
int* node = node_cpu.mutable_data<int>({static_cast<int64_t>(total_size)},
cpu_place);
T* eta = eta_cpu.mutable_data<T>({static_cast<int64_t>(total_size * 3)},
cpu_place);
int* index = index_cpu.mutable_data<int>(
{static_cast<int64_t>(patch_size * 2)}, cpu_place);
int idx = 0, index_idx = 0;
for (auto& tmp : processing_list) {
index[index_idx++] = idx;
for (auto& v : tmp) {
node[idx] = static_cast<int>(v.node - 1);
eta[idx * 3] = v.eta_l<T>(max_depth);
eta[idx * 3 + 1] = v.eta_r<T>(max_depth);
eta[idx * 3 + 2] = v.eta_t<T>(max_depth);
idx++;
}
index[index_idx++] = idx;
}
framework::TensorCopy(node_cpu, gpu_place, context, &node_gpu);
framework::TensorCopy(eta_cpu, gpu_place, context, &eta_gpu);
framework::TensorCopy(index_cpu, gpu_place, context, &index_gpu);
int elem_size = patch_size * feature_size;
int blocks = (elem_size + 1024 - 1) / 1024;
int block_x = 512;
int block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
patch->mutable_data<T>(
{static_cast<int64_t>(max_size), static_cast<int64_t>(patch_elem_size)},
gpu_place);
constant(context, patch, 0);
hipLaunchKernelGGL(( tree2col<T>), dim3(grid), dim3(threads), 0, stream,
eta_gpu.data<T>(), node_gpu.data<int>(), index_gpu.data<int>(),
node_features.data<T>(), patch->data<T>(), feature_size, patch_size);
}
};
template <typename T>
class Col2TreeFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& EdgeSet,
const framework::Tensor& patch_grad,
framework::Tensor* embedding_grad, int max_depth) {
std::vector<std::vector<int>> tr;
auto gpu_place = context.GetPlace();
auto cpu_place = platform::CPUPlace();
auto stream = context.stream();
auto output_dims = patch_grad.dims();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant;
Tensor EdgeSet_cpu;
framework::TensorCopy(EdgeSet, cpu_place, &EdgeSet_cpu);
int64_t output_size = output_dims[1];
size_t patch_elem_size = 3 * static_cast<size_t>(output_size);
size_t node_count = 0, patch_count = 0;
size_t max_size = output_dims[0];
Tree2ColUtil::construct_tree(EdgeSet_cpu, &tr, &node_count);
std::vector<std::vector<Node>> processing_list;
std::vector<std::vector<Node>> grad_list;
grad_list.resize(node_count);
size_t total_size = 0, grad_size = node_count;
for (size_t u = 1; u <= node_count; u++) {
std::vector<Node> tmp = Tree2ColUtil::construct_patch(u, max_depth, tr);
if (!tmp.empty()) {
processing_list.push_back(tmp);
}
}
for (size_t patch_id = 0; patch_id < processing_list.size(); patch_id++) {
for (auto v : processing_list[patch_id]) {
grad_list[v.get_node() - 1].push_back(v.change_node(patch_id + 1));
}
}
for (auto& tmp : grad_list) {
total_size += tmp.size();
}
Tensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
int* node = node_cpu.mutable_data<int>({static_cast<int64_t>(total_size)},
cpu_place);
T* eta = eta_cpu.mutable_data<T>({static_cast<int64_t>(total_size * 3)},
cpu_place);
int* index = index_cpu.mutable_data<int>(
{static_cast<int64_t>(grad_size * 2)}, cpu_place);
size_t idx = 0, index_idx = 0;
for (auto& tmp : grad_list) {
index[index_idx++] = idx;
for (auto& v : tmp) {
node[idx] = static_cast<int>(v.node - 1);
eta[idx * 3] = v.eta_l<T>(max_depth);
eta[idx * 3 + 1] = v.eta_r<T>(max_depth);
eta[idx * 3 + 2] = v.eta_t<T>(max_depth);
idx++;
}
index[index_idx++] = idx;
}
framework::TensorCopy(node_cpu, gpu_place, &node_gpu);
framework::TensorCopy(eta_cpu, gpu_place, &eta_gpu);
framework::TensorCopy(index_cpu, gpu_place, &index_gpu);
int elem_size = output_size * grad_size;
int blocks = (elem_size + 1024 - 1) / 1024;
int block_x = 512;
int block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
embedding_grad->mutable_data<T>(
{static_cast<int64_t>(max_size), static_cast<int64_t>(patch_elem_size)},
gpu_place);
constant(context, embedding_grad, 0);
hipLaunchKernelGGL(( tree2col<T>), dim3(grid), dim3(threads), 0, stream,
eta_gpu.data<T>(), node_gpu.data<int>(), index_gpu.data<int>(),
patch_grad.data<T>(), embedding_grad->data<T>(), output_size,
grad_size);
}
};
template class Tree2ColFunctor<platform::CUDADeviceContext, float>;
template class Tree2ColFunctor<platform::CUDADeviceContext, double>;
template class Col2TreeFunctor<platform::CUDADeviceContext, float>;
template class Col2TreeFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
| ea3b72ca8415d389ab405a98845eaf9fc378db9a.cu | // Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <stack>
#include "paddle/fluid/operators/math/tree2col.h"
#include "paddle/phi/kernels/funcs/math_function.h"
namespace paddle {
namespace operators {
namespace math {
using Tensor = framework::Tensor;
using Node = paddle::operators::math::TreeNode;
template <typename T>
__global__ void tree2col(const T* eta, const int* node, const int* index,
const T* vectors, T* result, int feature_size, int n) {
const int thread_id =
(blockIdx.x * gridDim.y + blockIdx.y) * blockDim.x + threadIdx.x;
const int patch_id = thread_id / feature_size;
const int j = thread_id % feature_size;
if (patch_id < n) {
const int begin_o = patch_id * 3 * feature_size;
const int begin = index[patch_id * 2], end = index[patch_id * 2 + 1];
T res_l = 0, res_r = 0, res_t = 0;
for (int i = begin; i < end; i++) {
const int id = node[i];
const T vec = vectors[id * feature_size + j];
res_l += eta[i * 3] * vec;
res_r += eta[i * 3 + 1] * vec;
res_t += eta[i * 3 + 2] * vec;
}
result[begin_o + j * 3] = res_l;
result[begin_o + j * 3 + 1] = res_r;
result[begin_o + j * 3 + 2] = res_t;
}
}
template <typename T>
class Tree2ColFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const paddle::platform::CUDADeviceContext& context,
const framework::Tensor& EdgeSet,
const framework::Tensor& node_features,
framework::Tensor* patch, int max_depth) {
std::vector<std::vector<int>> tr;
auto gpu_place = context.GetPlace();
auto cpu_place = platform::CPUPlace();
auto stream = context.stream();
auto feature_dims = node_features.dims();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant;
Tensor EdgeSet_cpu;
framework::TensorCopy(EdgeSet, cpu_place, &EdgeSet_cpu);
int64_t feature_size = feature_dims[1];
size_t patch_elem_size = 3 * static_cast<size_t>(feature_size);
size_t node_count = 0, patch_count = 0, total_size = 0;
size_t max_size = feature_dims[0];
Tree2ColUtil::construct_tree(EdgeSet_cpu, &tr, &node_count);
std::vector<std::vector<Node>> processing_list;
for (size_t u = 1; u <= node_count; u++) {
std::vector<Node> tmp = Tree2ColUtil::construct_patch(u, max_depth, tr);
if (!tmp.empty()) {
processing_list.push_back(tmp);
total_size += tmp.size();
}
}
size_t patch_size = processing_list.size();
Tensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
int* node = node_cpu.mutable_data<int>({static_cast<int64_t>(total_size)},
cpu_place);
T* eta = eta_cpu.mutable_data<T>({static_cast<int64_t>(total_size * 3)},
cpu_place);
int* index = index_cpu.mutable_data<int>(
{static_cast<int64_t>(patch_size * 2)}, cpu_place);
int idx = 0, index_idx = 0;
for (auto& tmp : processing_list) {
index[index_idx++] = idx;
for (auto& v : tmp) {
node[idx] = static_cast<int>(v.node - 1);
eta[idx * 3] = v.eta_l<T>(max_depth);
eta[idx * 3 + 1] = v.eta_r<T>(max_depth);
eta[idx * 3 + 2] = v.eta_t<T>(max_depth);
idx++;
}
index[index_idx++] = idx;
}
framework::TensorCopy(node_cpu, gpu_place, context, &node_gpu);
framework::TensorCopy(eta_cpu, gpu_place, context, &eta_gpu);
framework::TensorCopy(index_cpu, gpu_place, context, &index_gpu);
int elem_size = patch_size * feature_size;
int blocks = (elem_size + 1024 - 1) / 1024;
int block_x = 512;
int block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
patch->mutable_data<T>(
{static_cast<int64_t>(max_size), static_cast<int64_t>(patch_elem_size)},
gpu_place);
constant(context, patch, 0);
tree2col<T><<<grid, threads, 0, stream>>>(
eta_gpu.data<T>(), node_gpu.data<int>(), index_gpu.data<int>(),
node_features.data<T>(), patch->data<T>(), feature_size, patch_size);
}
};
template <typename T>
class Col2TreeFunctor<platform::CUDADeviceContext, T> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& EdgeSet,
const framework::Tensor& patch_grad,
framework::Tensor* embedding_grad, int max_depth) {
std::vector<std::vector<int>> tr;
auto gpu_place = context.GetPlace();
auto cpu_place = platform::CPUPlace();
auto stream = context.stream();
auto output_dims = patch_grad.dims();
phi::funcs::SetConstant<platform::CUDADeviceContext, T> constant;
Tensor EdgeSet_cpu;
framework::TensorCopy(EdgeSet, cpu_place, &EdgeSet_cpu);
int64_t output_size = output_dims[1];
size_t patch_elem_size = 3 * static_cast<size_t>(output_size);
size_t node_count = 0, patch_count = 0;
size_t max_size = output_dims[0];
Tree2ColUtil::construct_tree(EdgeSet_cpu, &tr, &node_count);
std::vector<std::vector<Node>> processing_list;
std::vector<std::vector<Node>> grad_list;
grad_list.resize(node_count);
size_t total_size = 0, grad_size = node_count;
for (size_t u = 1; u <= node_count; u++) {
std::vector<Node> tmp = Tree2ColUtil::construct_patch(u, max_depth, tr);
if (!tmp.empty()) {
processing_list.push_back(tmp);
}
}
for (size_t patch_id = 0; patch_id < processing_list.size(); patch_id++) {
for (auto v : processing_list[patch_id]) {
grad_list[v.get_node() - 1].push_back(v.change_node(patch_id + 1));
}
}
for (auto& tmp : grad_list) {
total_size += tmp.size();
}
Tensor node_cpu, node_gpu, eta_cpu, eta_gpu, index_cpu, index_gpu;
int* node = node_cpu.mutable_data<int>({static_cast<int64_t>(total_size)},
cpu_place);
T* eta = eta_cpu.mutable_data<T>({static_cast<int64_t>(total_size * 3)},
cpu_place);
int* index = index_cpu.mutable_data<int>(
{static_cast<int64_t>(grad_size * 2)}, cpu_place);
size_t idx = 0, index_idx = 0;
for (auto& tmp : grad_list) {
index[index_idx++] = idx;
for (auto& v : tmp) {
node[idx] = static_cast<int>(v.node - 1);
eta[idx * 3] = v.eta_l<T>(max_depth);
eta[idx * 3 + 1] = v.eta_r<T>(max_depth);
eta[idx * 3 + 2] = v.eta_t<T>(max_depth);
idx++;
}
index[index_idx++] = idx;
}
framework::TensorCopy(node_cpu, gpu_place, &node_gpu);
framework::TensorCopy(eta_cpu, gpu_place, &eta_gpu);
framework::TensorCopy(index_cpu, gpu_place, &index_gpu);
int elem_size = output_size * grad_size;
int blocks = (elem_size + 1024 - 1) / 1024;
int block_x = 512;
int block_y = (blocks + 512 - 1) / 512;
dim3 threads(1024, 1);
dim3 grid(block_x, block_y);
embedding_grad->mutable_data<T>(
{static_cast<int64_t>(max_size), static_cast<int64_t>(patch_elem_size)},
gpu_place);
constant(context, embedding_grad, 0);
tree2col<T><<<grid, threads, 0, stream>>>(
eta_gpu.data<T>(), node_gpu.data<int>(), index_gpu.data<int>(),
patch_grad.data<T>(), embedding_grad->data<T>(), output_size,
grad_size);
}
};
template class Tree2ColFunctor<platform::CUDADeviceContext, float>;
template class Tree2ColFunctor<platform::CUDADeviceContext, double>;
template class Col2TreeFunctor<platform::CUDADeviceContext, float>;
template class Col2TreeFunctor<platform::CUDADeviceContext, double>;
} // namespace math
} // namespace operators
} // namespace paddle
|
b86d33b0dba109bcaa0cd97c3117a7a5cc33e914.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// RUN: %clang_cc1 -no-opaque-pointers -triple nvptx64-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
//
// RUN: %clang_cc1 -no-opaque-pointers -triple nvptx-unknown-unknown -target-cpu sm_50 \
// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
// We have to keep all builtins that depend on particular target feature in the
// same function, because the codegen will stop after the very first function
// that encounters an error, so -verify will not be able to find errors in
// subsequent functions.
// CHECK-LABEL: test_fn
__device__ void test_fn(double d, double* double_ptr) {
// CHECK: atomicrmw fadd double* {{.*}} seq_cst, align 8
// expected-error@+1 {{'__nvvm_atom_add_gen_d' needs target feature sm_60}}
__nvvm_atom_add_gen_d(double_ptr, d);
}
| b86d33b0dba109bcaa0cd97c3117a7a5cc33e914.cu | // RUN: %clang_cc1 -no-opaque-pointers -triple nvptx64-unknown-unknown -target-cpu sm_60 \
// RUN: -fcuda-is-device -S -emit-llvm -o - -x cuda %s \
// RUN: | FileCheck -check-prefix=CHECK %s
//
// RUN: %clang_cc1 -no-opaque-pointers -triple nvptx-unknown-unknown -target-cpu sm_50 \
// RUN: -fcuda-is-device -S -o /dev/null -x cuda -verify %s
#define __device__ __attribute__((device))
#define __global__ __attribute__((global))
#define __shared__ __attribute__((shared))
#define __constant__ __attribute__((constant))
// We have to keep all builtins that depend on particular target feature in the
// same function, because the codegen will stop after the very first function
// that encounters an error, so -verify will not be able to find errors in
// subsequent functions.
// CHECK-LABEL: test_fn
__device__ void test_fn(double d, double* double_ptr) {
// CHECK: atomicrmw fadd double* {{.*}} seq_cst, align 8
// expected-error@+1 {{'__nvvm_atom_add_gen_d' needs target feature sm_60}}
__nvvm_atom_add_gen_d(double_ptr, d);
}
|
ada80002061eb754abb22982e1b9a869f64c21a2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
if (comp <= (var_4 - var_5 + (-1.4965E-35f + +1.1260E34f))) {
if (comp >= +1.7005E34f + var_6 - var_7 - +1.7973E-36f - floorf(-1.0959E34f + +1.7362E-43f)) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = (var_8 * var_9 - +1.7597E36f + +1.4203E-37f);
float tmp_2 = -1.5612E34f;
comp = tmp_2 * tmp_1 - ldexpf(-1.0539E34f - var_10, 2);
for (int i=0; i < var_2; ++i) {
float tmp_3 = (var_11 - expf(+1.4455E34f * (+1.9387E8f + +1.6840E36f)));
float tmp_4 = +1.7951E-11f;
comp += tmp_4 * tmp_3 / -1.0912E-37f + -1.5211E36f;
comp += (-1.5596E35f - (+1.1210E-37f * var_12 - (var_13 * var_14)));
}
if (comp < (var_15 + var_16 / (var_17 * (+1.5478E-16f + logf((-0.0f * var_18 * +1.8467E-44f / (var_19 * var_20))))))) {
comp += +1.9018E-44f / var_21;
}
for (int i=0; i < var_3; ++i) {
comp = powf(+1.4150E-43f, -1.4939E-41f / ldexpf((var_22 * +1.1751E-6f * (-1.3135E-43f + var_23 + +1.1912E-35f)), 2));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
hipDeviceSynchronize();
return 0;
}
| ada80002061eb754abb22982e1b9a869f64c21a2.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, int var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12,float var_13,float var_14,float var_15,float var_16,float var_17,float var_18,float var_19,float var_20,float var_21,float var_22,float var_23) {
if (comp <= (var_4 - var_5 + (-1.4965E-35f + +1.1260E34f))) {
if (comp >= +1.7005E34f + var_6 - var_7 - +1.7973E-36f - floorf(-1.0959E34f + +1.7362E-43f)) {
for (int i=0; i < var_1; ++i) {
float tmp_1 = (var_8 * var_9 - +1.7597E36f + +1.4203E-37f);
float tmp_2 = -1.5612E34f;
comp = tmp_2 * tmp_1 - ldexpf(-1.0539E34f - var_10, 2);
for (int i=0; i < var_2; ++i) {
float tmp_3 = (var_11 - expf(+1.4455E34f * (+1.9387E8f + +1.6840E36f)));
float tmp_4 = +1.7951E-11f;
comp += tmp_4 * tmp_3 / -1.0912E-37f + -1.5211E36f;
comp += (-1.5596E35f - (+1.1210E-37f * var_12 - (var_13 * var_14)));
}
if (comp < (var_15 + var_16 / (var_17 * (+1.5478E-16f + logf((-0.0f * var_18 * +1.8467E-44f / (var_19 * var_20))))))) {
comp += +1.9018E-44f / var_21;
}
for (int i=0; i < var_3; ++i) {
comp = powf(+1.4150E-43f, -1.4939E-41f / ldexpf((var_22 * +1.1751E-6f * (-1.3135E-43f + var_23 + +1.1912E-35f)), 2));
}
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
int tmp_2 = atoi(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
float tmp_14 = atof(argv[14]);
float tmp_15 = atof(argv[15]);
float tmp_16 = atof(argv[16]);
float tmp_17 = atof(argv[17]);
float tmp_18 = atof(argv[18]);
float tmp_19 = atof(argv[19]);
float tmp_20 = atof(argv[20]);
float tmp_21 = atof(argv[21]);
float tmp_22 = atof(argv[22]);
float tmp_23 = atof(argv[23]);
float tmp_24 = atof(argv[24]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13,tmp_14,tmp_15,tmp_16,tmp_17,tmp_18,tmp_19,tmp_20,tmp_21,tmp_22,tmp_23,tmp_24);
cudaDeviceSynchronize();
return 0;
}
|
2113eaf92aef2f6a7abfe878e1f3e68c6880e224.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/hip/HIPContext.h>
#include <THH/THHGeneral.h>
#include <THH/THHNumerics.cuh>
#include <THH/THHAtomics.cuh> // for gpuAtomicAdd
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
namespace {
__device__ inline int start_index(int a, int b, int c) {
return (int)::floor((float)(a * c) / b);
}
__device__ inline int end_index(int a, int b, int c) {
return (int)::ceil((float)((a + 1) * c) / b);
}
// 5d tensor B x D x T x H x W
// All kernels view batch dim B and dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 5D tensor along dimensions
* 2, 3, and 4 5D input, 5D output
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*/
template <typename scalar_t, typename accscalar_t>
__global__ void adaptiveaveragepool(
scalar_t *input, scalar_t *output,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD,
int64_t istrideT, int64_t istrideH, int64_t istrideW,
int64_t offsetZ) {
// iterates on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // slice/feature
// input frame/time range is fixed.
int istartT = start_index(ot, osizeT, isizeT);
int iendT = end_index(ot, osizeT, isizeT);
int kT = iendT - istartT;
// input offset by slice/feature and earliest relevant frame/time
scalar_t *input_dt = input + d*istrideD + istartT*istrideT;
// output offset by slice/feature and frame/time
scalar_t *output_dt = output + o_plane*osizeH*osizeW;
// For all output pixels...
for (oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for (ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling from corresponding input pixels
scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW;
scalar_t *ptr_output = output_dt + oh*osizeW + ow;
accscalar_t sum = static_cast<accscalar_t>(0);
int it, ih, iw;
for (it = 0; it < kT; ++it) {
for (ih = 0; ih < kH; ++ih) {
for (iw = 0; iw < kW; ++iw) {
scalar_t val = ptr_input[ih*istrideH + iw*istrideW];
sum += static_cast<accscalar_t>(val);
}
}
ptr_input += istrideT; // next input frame
}
// Update output
const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW);
*ptr_output = static_cast<scalar_t>(sum / divide_factor);
}
}
}
template <typename scalar_t, typename accscalar_t>
void adaptiveaveragepool_loop(
scalar_t *input_data, scalar_t *output_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = ::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
hipLaunchKernelGGL(( adaptiveaveragepool<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
input_data, output_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD,
istrideT, istrideH, istrideW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*/
template <typename scalar_t, typename accscalar_t>
__global__ void adaptiveaveragegradinput(
scalar_t *gradInput, scalar_t *gradOutput,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ)
{
// iterators on input pixels
int it, ih, iw;
// compute offsets based on thread/block ID
int istartH = blockIdx.y * blockDim.y + threadIdx.y;
int iendH = isizeH;
int istepH = gridDim.y * blockDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// select input plane
int64_t i_plane = blockIdx.x + offsetZ;
it = i_plane % isizeT; // output frame/time
int d = i_plane / isizeT; // slice/feature
// output frame/time range is fixed.
int ostartT = start_index(it, isizeT, osizeT);
int oendT = end_index(it, isizeT, osizeT);
// gradInput offset by slice/feature and frame/time.
scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW;
// gradOutput offset by slice/feature and earliest relevant frame/time
scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW;
// For all input pixels...
for (ih = istartH; ih < iendH; ih += istepH) {
int ostartH = start_index(ih, isizeH, osizeH);
int oendH = end_index(ih, isizeH, osizeH);
for (iw = istartW; iw < iendW; iw += istepW) {
int ostartW = start_index(iw, isizeW, osizeW);
int oendW = end_index(iw, isizeW, osizeW);
// Compute the gradients from corresponding output pixels
scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw;
scalar_t *ptr_gradOutput = gradOutput_dt;
// for all relevant output pixels
int ot, oh, ow;
for (ot = ostartT; ot < oendT; ++ot) {
int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT);
for (oh = ostartH; oh < oendH; ++oh) {
int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH);
for (ow = ostartW; ow < oendW; ++ow) {
int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW);
const accscalar_t divide_factor = kW * kH * kT;
accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor);
*ptr_gradInput += static_cast<scalar_t>(grad_delta);
}
}
ptr_gradOutput += osizeH*osizeW; // next output frame
}
}
}
}
template <typename scalar_t, typename accscalar_t>
void adaptiveaveragegradinput_loop(
scalar_t *gradInput_data, scalar_t *gradOutput_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = ::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
hipLaunchKernelGGL(( adaptiveaveragegradinput<scalar_t, accscalar_t>)
, dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* (uses atomic add)
*
*/
template <typename scalar_t>
__global__ void atomicadaptiveaveragegradinput(
scalar_t *gradInput, scalar_t *gradOutput,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ)
{
// iterators on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // output slice/feature
// input frame/time range is fixed.
int istartT = start_index(ot, osizeT, isizeT);
int iendT = end_index(ot, osizeT, isizeT);
int kT = iendT - istartT;
// gradInput offset by slice/feature and earliest relevant frame/time
scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/time
scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW;
// For all output pixels...
for (oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for (ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients from corresponding input pixels
scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW;
scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow;
scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW;
int it, ih, iw;
for (it = 0; it < kT; ++it) {
for (ih = 0; ih < kH; ++ih) {
for (iw = 0; iw < kW; ++iw) {
gpuAtomicAdd(&(ptr_gradInput[ih*isizeW + iw]), grad_delta);
}
}
ptr_gradInput += isizeH*isizeW; // next input frame
}
}
}
}
template <typename scalar_t>
void atomicadaptiveaveragegradinput_loop(
scalar_t* gradInput_data, scalar_t* gradOutput_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
int blocksH = ::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
hipLaunchKernelGGL(( atomicadaptiveaveragegradinput), dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
gradInput_data, gradOutput_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
offsetZ);
C10_HIP_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
// 5D tensor B x D x T x H x w
void adaptive_avg_pool3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef& output_size) {
TensorArg output_arg{output, "output", 1};
TensorArg input_arg{input_, "input_", 2};
checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg});
for (int64_t i = 0; i < input_.ndimension(); i++) {
TORCH_CHECK(
input_.size(i) > 0,
"adaptive_avg_pool3d_cuda(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input_.sizes(),
" with dimension ", i, " being empty");
}
TORCH_CHECK(
(input_.ndimension() == 4 || input_.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
// the jit sometimes passes output_size.size() == 1
TORCH_CHECK(
output_size.size() == 1 || output_size.size() == 3,
"adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3");
int64_t osizeT = output_size[0];
int64_t osizeH = output_size[1];
int64_t osizeW = output_size[2];
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous();
if (input.ndimension() == 4) {
sizeD = input.size(0);
isizeT = input.size(1);
isizeH = input.size(2);
isizeW = input.size(3);
istrideD = input.stride(0);
istrideT = input.stride(1);
istrideH = input.stride(2);
istrideW = input.stride(3);
output.resize_({sizeD, osizeT, osizeH, osizeW});
totalZ = sizeD * osizeT;
} else {
int64_t sizeB = input.size(0);
sizeD = input.size(1);
isizeT = input.size(2);
isizeH = input.size(3);
isizeW = input.size(4);
istrideD = input.stride(1);
istrideT = input.stride(2);
istrideH = input.stride(3);
istrideW = input.stride(4);
output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW});
totalZ = sizeB * sizeD * osizeT;
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* output_data = output.data_ptr<scalar_t>();
adaptiveaveragepool_loop<scalar_t, accscalar_t>(
input_data, output_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT, istrideH, istrideW);
});
}
void adaptive_avg_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input) {
TensorArg grad_input_arg{gradInput, "gradInput", 1};
TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2};
TensorArg input_arg{input, "input", 3};
checkAllSameGPU(
"adaptive_avg_pool3d_out_cuda",
{grad_input_arg, grad_output_arg, input_arg});
const Tensor gradOutput = gradOutput_.contiguous();
gradInput.resize_as_(input);
gradInput.zero_();
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input.ndimension() == 4) {
sizeD = input.size(0);
isizeT = input.size(1);
isizeH = input.size(2);
isizeW = input.size(3);
osizeT = gradOutput.size(1);
osizeH = gradOutput.size(2);
osizeW = gradOutput.size(3);
} else {
sizeD = input.size(1);
isizeT = input.size(2);
isizeH = input.size(3);
isizeW = input.size(4);
osizeT = gradOutput.size(2);
osizeH = gradOutput.size(3);
osizeW = gradOutput.size(4);
}
bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input.ndimension() == 4) {
totalZ = atomic ? sizeD * osizeT : sizeD * isizeT;
} else {
int sizeB = input.size(0);
totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT;
}
if (atomic) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] {
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();
atomicadaptiveaveragegradinput_loop(
gradInput_data, gradOutput_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();
adaptiveaveragegradinput_loop<scalar_t, accscalar_t>(
gradInput_data, gradOutput_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
});
}
}
} // namespace
Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input,
IntArrayRef output_size,
Tensor& output) {
adaptive_avg_pool3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor adaptive_avg_pool3d_cuda(
const Tensor& input,
IntArrayRef output_size) {
auto output = at::empty({0}, input.options());
adaptive_avg_pool3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_,
const Tensor& input,
Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda");
adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input);
return gradInput;
}
Tensor adaptive_avg_pool3d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input);
return gradInput;
}
} // namespace native
} // namespace at
| 2113eaf92aef2f6a7abfe878e1f3e68c6880e224.cu | #include <ATen/ATen.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/cuda/CUDAContext.h>
#include <THC/THCGeneral.h>
#include <THC/THCNumerics.cuh>
#include <THC/THCAtomics.cuh> // for gpuAtomicAdd
#include <c10/util/Exception.h>
#include <algorithm>
#include <cfloat>
#include <cmath>
namespace at {
namespace native {
namespace {
__device__ inline int start_index(int a, int b, int c) {
return (int)std::floor((float)(a * c) / b);
}
__device__ inline int end_index(int a, int b, int c) {
return (int)std::ceil((float)((a + 1) * c) / b);
}
// 5d tensor B x D x T x H x W
// All kernels view batch dim B and dim D as collapsed.
/*
* Description:
* this function adaptively average pools an input 5D tensor along dimensions
* 2, 3, and 4 5D input, 5D output
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*/
template <typename scalar_t, typename accscalar_t>
__global__ void adaptiveaveragepool(
scalar_t *input, scalar_t *output,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD,
int64_t istrideT, int64_t istrideH, int64_t istrideW,
int64_t offsetZ) {
// iterates on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // slice/feature
// input frame/time range is fixed.
int istartT = start_index(ot, osizeT, isizeT);
int iendT = end_index(ot, osizeT, isizeT);
int kT = iendT - istartT;
// input offset by slice/feature and earliest relevant frame/time
scalar_t *input_dt = input + d*istrideD + istartT*istrideT;
// output offset by slice/feature and frame/time
scalar_t *output_dt = output + o_plane*osizeH*osizeW;
// For all output pixels...
for (oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for (ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the average pooling from corresponding input pixels
scalar_t *ptr_input = input_dt + istartH*istrideH + istartW*istrideW;
scalar_t *ptr_output = output_dt + oh*osizeW + ow;
accscalar_t sum = static_cast<accscalar_t>(0);
int it, ih, iw;
for (it = 0; it < kT; ++it) {
for (ih = 0; ih < kH; ++ih) {
for (iw = 0; iw < kW; ++iw) {
scalar_t val = ptr_input[ih*istrideH + iw*istrideW];
sum += static_cast<accscalar_t>(val);
}
}
ptr_input += istrideT; // next input frame
}
// Update output
const accscalar_t divide_factor = static_cast<accscalar_t>(kT * kH * kW);
*ptr_output = static_cast<scalar_t>(sum / divide_factor);
}
}
}
template <typename scalar_t, typename accscalar_t>
void adaptiveaveragepool_loop(
scalar_t *input_data, scalar_t *output_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t istrideD, int64_t istrideT, int64_t istrideH, int64_t istrideW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = std::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
adaptiveaveragepool<scalar_t, accscalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
input_data, output_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD,
istrideT, istrideH, istrideW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*/
template <typename scalar_t, typename accscalar_t>
__global__ void adaptiveaveragegradinput(
scalar_t *gradInput, scalar_t *gradOutput,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ)
{
// iterators on input pixels
int it, ih, iw;
// compute offsets based on thread/block ID
int istartH = blockIdx.y * blockDim.y + threadIdx.y;
int iendH = isizeH;
int istepH = gridDim.y * blockDim.y;
int istartW = threadIdx.x;
int iendW = isizeW;
int istepW = blockDim.x;
// select input plane
int64_t i_plane = blockIdx.x + offsetZ;
it = i_plane % isizeT; // output frame/time
int d = i_plane / isizeT; // slice/feature
// output frame/time range is fixed.
int ostartT = start_index(it, isizeT, osizeT);
int oendT = end_index(it, isizeT, osizeT);
// gradInput offset by slice/feature and frame/time.
scalar_t *gradInput_dt = gradInput + i_plane*isizeH*isizeW;
// gradOutput offset by slice/feature and earliest relevant frame/time
scalar_t *gradOutput_dt = gradOutput + (d*osizeT + ostartT)*osizeH*osizeW;
// For all input pixels...
for (ih = istartH; ih < iendH; ih += istepH) {
int ostartH = start_index(ih, isizeH, osizeH);
int oendH = end_index(ih, isizeH, osizeH);
for (iw = istartW; iw < iendW; iw += istepW) {
int ostartW = start_index(iw, isizeW, osizeW);
int oendW = end_index(iw, isizeW, osizeW);
// Compute the gradients from corresponding output pixels
scalar_t *ptr_gradInput = gradInput_dt + ih*isizeW + iw;
scalar_t *ptr_gradOutput = gradOutput_dt;
// for all relevant output pixels
int ot, oh, ow;
for (ot = ostartT; ot < oendT; ++ot) {
int kT = end_index(ot, osizeT, isizeT) - start_index(ot, osizeT, isizeT);
for (oh = ostartH; oh < oendH; ++oh) {
int kH = end_index(oh, osizeH, isizeH) - start_index(oh, osizeH, isizeH);
for (ow = ostartW; ow < oendW; ++ow) {
int kW = end_index(ow, osizeW, isizeW) - start_index(ow, osizeW, isizeW);
const accscalar_t divide_factor = kW * kH * kT;
accscalar_t grad_delta = static_cast<accscalar_t>(ptr_gradOutput[oh*osizeW + ow] / divide_factor);
*ptr_gradInput += static_cast<scalar_t>(grad_delta);
}
}
ptr_gradOutput += osizeH*osizeW; // next output frame
}
}
}
}
template <typename scalar_t, typename accscalar_t>
void adaptiveaveragegradinput_loop(
scalar_t *gradInput_data, scalar_t *gradOutput_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
// each H*W plane is processed by blocksH thread blocks
int blocksH = std::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
adaptiveaveragegradinput<scalar_t, accscalar_t>
<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInput_data, gradOutput_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
/*
* Description:
* This function computes the gradInput from gradOutput.
*
* gridDim.y blocks work together on a single 2D output plane specified by
* (blockIdx.x + offsetZ).
*
* (uses atomic add)
*
*/
template <typename scalar_t>
__global__ void atomicadaptiveaveragegradinput(
scalar_t *gradInput, scalar_t *gradOutput,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW,
int64_t offsetZ)
{
// iterators on output pixels
int ot, oh, ow;
// compute offsets based on thread/block ID
int ostartH = blockIdx.y * blockDim.y + threadIdx.y;
int oendH = osizeH;
int ostepH = gridDim.y * blockDim.y;
int ostartW = threadIdx.x;
int oendW = osizeW;
int ostepW = blockDim.x;
// select output plane
int64_t o_plane = blockIdx.x + offsetZ;
ot = o_plane % osizeT; // output frame/time
int d = o_plane / osizeT; // output slice/feature
// input frame/time range is fixed.
int istartT = start_index(ot, osizeT, isizeT);
int iendT = end_index(ot, osizeT, isizeT);
int kT = iendT - istartT;
// gradInput offset by slice/feature and earliest relevant frame/time
scalar_t *gradInput_nt = gradInput + (d*isizeT + istartT)*isizeH*isizeW;
// gradOutput offset by slice/feature and frame/time
scalar_t *gradOutput_nt = gradOutput + o_plane*osizeH*osizeW;
// For all output pixels...
for (oh = ostartH; oh < oendH; oh += ostepH) {
int istartH = start_index(oh, osizeH, isizeH);
int iendH = end_index(oh, osizeH, isizeH);
int kH = iendH - istartH;
for (ow = ostartW; ow < oendW; ow += ostepW) {
int istartW = start_index(ow, osizeW, isizeW);
int iendW = end_index(ow, osizeW, isizeW);
int kW = iendW - istartW;
// Compute the gradients from corresponding input pixels
scalar_t *ptr_gradInput = gradInput_nt + istartH*isizeW + istartW;
scalar_t *ptr_gradOutput = gradOutput_nt + oh*osizeW + ow;
scalar_t grad_delta = *ptr_gradOutput / kT / kH / kW;
int it, ih, iw;
for (it = 0; it < kT; ++it) {
for (ih = 0; ih < kH; ++ih) {
for (iw = 0; iw < kW; ++iw) {
gpuAtomicAdd(&(ptr_gradInput[ih*isizeW + iw]), grad_delta);
}
}
ptr_gradInput += isizeH*isizeW; // next input frame
}
}
}
}
template <typename scalar_t>
void atomicadaptiveaveragegradinput_loop(
scalar_t* gradInput_data, scalar_t* gradOutput_data,
int64_t totalZ,
int isizeT, int isizeH, int isizeW,
int osizeT, int osizeH, int osizeW) {
int64_t offsetZ = 0;
dim3 threads(32, 8);
int blocksH = std::max((int)(16L / totalZ), 1);
while (totalZ > 0) {
dim3 blocks(totalZ > 65535 ? 65535 : totalZ, blocksH);
atomicadaptiveaveragegradinput<<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>(
gradInput_data, gradOutput_data,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
offsetZ);
C10_CUDA_KERNEL_LAUNCH_CHECK();
totalZ -= 65535;
offsetZ += 65535;
}
}
// 5D tensor B x D x T x H x w
void adaptive_avg_pool3d_out_cuda_template(
Tensor& output,
const Tensor& input_,
IntArrayRef& output_size) {
TensorArg output_arg{output, "output", 1};
TensorArg input_arg{input_, "input_", 2};
checkAllSameGPU("adaptive_avg_pool3d_cuda", {output_arg, input_arg});
for (int64_t i = 0; i < input_.ndimension(); i++) {
TORCH_CHECK(
input_.size(i) > 0,
"adaptive_avg_pool3d_cuda(): expected input to have non-empty spatial dimensions, "
"but input has sizes ", input_.sizes(),
" with dimension ", i, " being empty");
}
TORCH_CHECK(
(input_.ndimension() == 4 || input_.ndimension() == 5),
"non-empty 4D or 5D (batch mode) tensor expected for input");
// the jit sometimes passes output_size.size() == 1
TORCH_CHECK(
output_size.size() == 1 || output_size.size() == 3,
"adaptive_avg_pool3d: internal error: output_size.size() must be 1 or 3");
int64_t osizeT = output_size[0];
int64_t osizeH = output_size[1];
int64_t osizeW = output_size[2];
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t istrideD, istrideT, istrideH, istrideW;
int64_t totalZ;
const Tensor& input = input_.ndimension() == 4 ? input_ : input_.contiguous();
if (input.ndimension() == 4) {
sizeD = input.size(0);
isizeT = input.size(1);
isizeH = input.size(2);
isizeW = input.size(3);
istrideD = input.stride(0);
istrideT = input.stride(1);
istrideH = input.stride(2);
istrideW = input.stride(3);
output.resize_({sizeD, osizeT, osizeH, osizeW});
totalZ = sizeD * osizeT;
} else {
int64_t sizeB = input.size(0);
sizeD = input.size(1);
isizeT = input.size(2);
isizeH = input.size(3);
isizeW = input.size(4);
istrideD = input.stride(1);
istrideT = input.stride(2);
istrideH = input.stride(3);
istrideW = input.stride(4);
output.resize_({sizeB, sizeD, osizeT, osizeH, osizeW});
totalZ = sizeB * sizeD * osizeT;
}
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
scalar_t* input_data = input.data_ptr<scalar_t>();
scalar_t* output_data = output.data_ptr<scalar_t>();
adaptiveaveragepool_loop<scalar_t, accscalar_t>(
input_data, output_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW,
istrideD, istrideT, istrideH, istrideW);
});
}
void adaptive_avg_pool3d_backward_out_cuda_template(
Tensor& gradInput,
const Tensor& gradOutput_,
const Tensor& input) {
TensorArg grad_input_arg{gradInput, "gradInput", 1};
TensorArg grad_output_arg{gradOutput_, "gradOutput_", 2};
TensorArg input_arg{input, "input", 3};
checkAllSameGPU(
"adaptive_avg_pool3d_out_cuda",
{grad_input_arg, grad_output_arg, input_arg});
const Tensor gradOutput = gradOutput_.contiguous();
gradInput.resize_as_(input);
gradInput.zero_();
int64_t sizeD, isizeT, isizeH, isizeW;
int64_t osizeT, osizeH, osizeW;
int64_t totalZ;
if (input.ndimension() == 4) {
sizeD = input.size(0);
isizeT = input.size(1);
isizeH = input.size(2);
isizeW = input.size(3);
osizeT = gradOutput.size(1);
osizeH = gradOutput.size(2);
osizeW = gradOutput.size(3);
} else {
sizeD = input.size(1);
isizeT = input.size(2);
isizeH = input.size(3);
isizeW = input.size(4);
osizeT = gradOutput.size(2);
osizeH = gradOutput.size(3);
osizeW = gradOutput.size(4);
}
bool atomic = (isizeW%osizeW != 0) || (isizeH%osizeH != 0) || (isizeT%osizeT != 0);
if (input.ndimension() == 4) {
totalZ = atomic ? sizeD * osizeT : sizeD * isizeT;
} else {
int sizeB = input.size(0);
totalZ = atomic ? sizeB * sizeD * osizeT : sizeB * sizeD * isizeT;
}
if (atomic) {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] {
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();
atomicadaptiveaveragegradinput_loop(
gradInput_data, gradOutput_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND2(kHalf, kBFloat16,
input.scalar_type(), "adaptive_avg_pool3d_backward_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
scalar_t* gradInput_data = gradInput.data_ptr<scalar_t>();
scalar_t* gradOutput_data = gradOutput.data_ptr<scalar_t>();
adaptiveaveragegradinput_loop<scalar_t, accscalar_t>(
gradInput_data, gradOutput_data,
totalZ,
isizeT, isizeH, isizeW,
osizeT, osizeH, osizeW);
});
}
}
} // namespace
Tensor& adaptive_avg_pool3d_out_cuda(const Tensor& input,
IntArrayRef output_size,
Tensor& output) {
adaptive_avg_pool3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor adaptive_avg_pool3d_cuda(
const Tensor& input,
IntArrayRef output_size) {
auto output = at::empty({0}, input.options());
adaptive_avg_pool3d_out_cuda_template(output, input, output_size);
return output;
}
Tensor& adaptive_avg_pool3d_backward_out_cuda(const Tensor& gradOutput_,
const Tensor& input,
Tensor& gradInput) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_out_cuda");
adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input);
return gradInput;
}
Tensor adaptive_avg_pool3d_backward_cuda(
const Tensor& gradOutput_,
const Tensor& input) {
// See Note [Writing Nondeterministic Operations]
// Nondeterministic because of atomicAdd usage
globalContext().alertNotDeterministic("adaptive_avg_pool3d_backward_cuda");
auto gradInput = at::zeros_like(input, LEGACY_CONTIGUOUS_MEMORY_FORMAT);
adaptive_avg_pool3d_backward_out_cuda_template(gradInput, gradOutput_, input);
return gradInput;
}
} // namespace native
} // namespace at
|
fedcbf6d345316cece0e7d48b014394a6fb33afd.hip | // !!! This is a file automatically generated by hipify!!!
#include "stdio.h"
int main( void ) {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount( &count );
for (int i=0; i < count; i++) {
hipGetDeviceProperties( &prop, i );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
| fedcbf6d345316cece0e7d48b014394a6fb33afd.cu | #include "stdio.h"
int main( void ) {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount( &count );
for (int i=0; i < count; i++) {
cudaGetDeviceProperties( &prop, i );
printf( " --- General Information for device %d ---\n", i );
printf( "Name: %s\n", prop.name );
printf( "Compute capability: %d.%d\n", prop.major, prop.minor );
printf( "Clock rate: %d\n", prop.clockRate );
printf( "Device copy overlap: " );
if (prop.deviceOverlap)
printf( "Enabled\n" );
else
printf( "Disabled\n");
printf( "Kernel execution timeout : " );
if (prop.kernelExecTimeoutEnabled)
printf( "Enabled\n" );
else
printf( "Disabled\n" );
printf( " --- Memory Information for device %d ---\n", i );
printf( "Total global mem: %ld\n", prop.totalGlobalMem );
printf( "Total constant Mem: %ld\n", prop.totalConstMem );
printf( "Max mem pitch: %ld\n", prop.memPitch );
printf( "Texture Alignment: %ld\n", prop.textureAlignment );
printf( " --- MP Information for device %d ---\n", i );
printf( "Multiprocessor count: %d\n",
prop.multiProcessorCount );
printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock );
printf( "Registers per mp: %d\n", prop.regsPerBlock );
printf( "Threads in warp: %d\n", prop.warpSize );
printf( "Max threads per block: %d\n",
prop.maxThreadsPerBlock );
printf( "Max thread dimensions: (%d, %d, %d)\n",
prop.maxThreadsDim[0], prop.maxThreadsDim[1],
prop.maxThreadsDim[2] );
printf( "Max grid dimensions: (%d, %d, %d)\n",
prop.maxGridSize[0], prop.maxGridSize[1],
prop.maxGridSize[2] );
printf( "\n" );
}
}
|
c11397ad52af8946f6c31f47676c3a3b99f8184c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_Test.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
//Scenario 2
//real sv11[]={-86.7599490237245,0.00123831208622928,0.784376608695859,0.784218467628080,0.000170016808347696,0.487085364989106,0.00290043259117021,0.999998410220405,1.87270147822737e-08,1.84334654710491e-05,0.999776444937499,1.00727320017378,0.999997421410314,4.09813553215966e-05,1.00091265418338,9.36478320062292,139.974256946572};
//Scenario 3
//real sv11[]={-86.6832615134402,0.00125876883400146,0.782519885686078,0.782385890597164,0.000171886605918564,0.486287153523371,0.00291631476093424,0.999998385692801,1.89678233086951e-08,1.86229043360926e-05,0.999783587315930,1.00721445029128,0.999996850289244,4.23696052205578e-05,0.487079901995765,10.1298949658907,139.478138182002};
//Scenario 4
//real sv11[]={-86.7531659359261,0.00124010826721524,0.784213090011930,0.784063751337305,0.000170184867440439,0.487014769904825,0.00290183337641837,0.999998408105558,1.87481748650298e-08,1.84501422061852e-05,0.999773598689194,1.00768875506436,0.999999512997626,3.10350472687116e-05,1.04650592961489,10.1580626436712,139.167353745914};
//Scenario4_1_106_pop76
//real sv11[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///initial condition
//Scenario 2
real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392};
//Scenario 3
//real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272};
//Scenario4
//real sv11[]={-86.7596599603487,0.00123838857632763,0.784369818846026,0.784223148947282,0.000169972136689011,0.487082365294413,0.00290049182352458,0.999998410215409,1.87279005544269e-08,1.84341746908718e-05,0.999781004659642,1.00771223118124,0.999999564103621,3.04673432492567e-05,0.993358298469861,10.1763606222150,139.168522102236};
//Scenario4_1
//real sv11[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
///Scenario 2:
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
///Scenario 3:
//real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05};
///Scenario 4:
//real parameters []={14.6970262149558,2.32527331724419e-05,0.000121747898718481,0.000276971880166082,0.210038991991875,0.120908114803453,0.200498466936257,5.12988959137240,0.0151231713364490,1.26415205898593,1083.02600285230,0.000542147164379904,0.160470068504854,0.0146070055973378,0.00183114105726186,1.00487709573505e-05};
//Scenario4_1_106_pop76
//real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05};
real GNa=parameters[0];
real GbNa=parameters[1];
real GCaL=parameters[2];
real GbCa=parameters[3];
real Gto=parameters[4];
real Gkr=parameters[5];
real Gks=parameters[6];
real GK1=parameters[7];
real GpK=parameters[8];
real knak=parameters[9];
real knaca=parameters[10];
real Vmaxup=parameters[11];
real GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
/// real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
/// real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
/// real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
/// real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
/// real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
/// real GNa=14.838;
//Parameters for IbNa
/// real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
/// real knak=1.362;
//Parameters for ICaL
/// real GCaL=0.000175;
//Parameters for IbCa
/// real GbCa=0.000592;
//Parameters for INaCa
/// real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
/// real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
/// real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
| c11397ad52af8946f6c31f47676c3a3b99f8184c.cu | #include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_Test.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
//Scenario 2
//real sv11[]={-86.7599490237245,0.00123831208622928,0.784376608695859,0.784218467628080,0.000170016808347696,0.487085364989106,0.00290043259117021,0.999998410220405,1.87270147822737e-08,1.84334654710491e-05,0.999776444937499,1.00727320017378,0.999997421410314,4.09813553215966e-05,1.00091265418338,9.36478320062292,139.974256946572};
//Scenario 3
//real sv11[]={-86.6832615134402,0.00125876883400146,0.782519885686078,0.782385890597164,0.000171886605918564,0.486287153523371,0.00291631476093424,0.999998385692801,1.89678233086951e-08,1.86229043360926e-05,0.999783587315930,1.00721445029128,0.999996850289244,4.23696052205578e-05,0.487079901995765,10.1298949658907,139.478138182002};
//Scenario 4
//real sv11[]={-86.7531659359261,0.00124010826721524,0.784213090011930,0.784063751337305,0.000170184867440439,0.487014769904825,0.00290183337641837,0.999998408105558,1.87481748650298e-08,1.84501422061852e-05,0.999773598689194,1.00768875506436,0.999999512997626,3.10350472687116e-05,1.04650592961489,10.1580626436712,139.167353745914};
//Scenario4_1_106_pop76
//real sv11[]={-86.6337556349546,0.00127215057254844,0.781315329700828,0.781192702879389,0.000173232959601247,0.485771934772721,0.00292661184320977,0.999998369627955,1.91248713554218e-08,1.87462257542883e-05,0.999765973534775,1.00688195901693,0.999991331074147,5.01588072510622e-05,0.719318246052902,9.82154696449291,139.637347751159};
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
///initial condition
//Scenario 2
real sv11[]={-86.7787928226268,0.00123339508649700,0.784831144233936,0.784673023102172,0.000169405106163081,0.487281523786458,0.00289654265697758,0.999998418745548,1.86681673058670e-08,1.83872100639159e-05,0.999777546403090,1.00731261455043,0.999997755681027,4.00467125306598e-05,0.953040239833913,9.39175391367938,139.965667493392};
//Scenario 3
//real sv11[]={-86.6902768323595,0.00125688376225555,0.782690257165761,0.782547892596001,0.000171750048746746,0.486360170563085,0.00291485827479809,0.999998387931464,1.89456679295569e-08,1.86054940017131e-05,0.999770742626069,1.00724037170339,0.999997113579370,4.17567836043613e-05,0.472458747863693,10.1478189383772,139.471917130272};
//Scenario4
//real sv11[]={-86.7596599603487,0.00123838857632763,0.784369818846026,0.784223148947282,0.000169972136689011,0.487082365294413,0.00290049182352458,0.999998410215409,1.87279005544269e-08,1.84341746908718e-05,0.999781004659642,1.00771223118124,0.999999564103621,3.04673432492567e-05,0.993358298469861,10.1763606222150,139.168522102236};
//Scenario4_1
//real sv11[]={-86.6404915792850,0.00127032163211322,0.781479753157976,0.781360816517016,0.000172969600594225,0.485842045427499,0.00292520813217015,0.999998371823369,1.91034113695031e-08,1.87293970187045e-05,0.999771221267447,1.00691525856031,0.999992103392003,4.93846276389813e-05,0.695256716079829,9.83880114557068,139.633017313049};
*((real * )((char *) sv + pitch * 0) + threadID) =sv11[0]; // V; millivolt
*((real * )((char *) sv + pitch * 1) + threadID) =sv11[1]; //M
*((real * )((char *) sv + pitch * 2) + threadID) =sv11[2]; //H
*((real * )((char *) sv + pitch * 3) + threadID) = sv11[3]; //J
*((real * )((char *) sv + pitch * 4) + threadID) =sv11[4]; //Xr1
*((real * )((char *) sv + pitch * 5) + threadID) =sv11[5]; //Xr2
*((real * )((char *) sv + pitch * 6) + threadID) = sv11[6]; //Xs
*((real * )((char *) sv + pitch * 7) + threadID) =sv11[7]; //S
*((real * )((char *) sv + pitch * 8) + threadID) =sv11[8]; //R
*((real * )((char *) sv + pitch * 9) + threadID) =sv11[9]; //D
*((real * )((char *) sv + pitch * 10) + threadID) =sv11[10]; //F
*((real * )((char *) sv + pitch * 11) + threadID) =sv11[11]; //FCa
*((real * )((char *) sv + pitch * 12) + threadID) =sv11[12]; //G
*((real * )((char *) sv + pitch * 13) + threadID) = sv11[13]; //Cai
*((real * )((char *) sv + pitch * 14) + threadID) =sv11[14]; //CaSR
*((real * )((char *) sv + pitch * 15) + threadID) = sv11[15]; //Nai
*((real * )((char *) sv + pitch * 16) + threadID) = sv11[16]; //Ki
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
///Scenario 2:
real parameters []={13.7730247891532,0.000208550376791424,0.000166345602997405,0.000314427207496467,0.272150547490643,0.206045798160674,0.134878222351137,2.91860118931279,0.0222099400341836,2.12194476134155,1099.53480175178,0.000604923870766662,0.118384383617544,0.0193733747777405,0.00390066599158743,2.21704721596155e-05};
///Scenario 3:
//real parameters []={14.2265776064284,0.000280045021984329,0.000123702304592752,0.000251556675811958,0.224623739779267,0.145045477736859,0.132102752427711,4.42712254301024,0.0156948843567210,1.61691730440283,1100,0.000520888772463349,0.258756467150201,0.0191544497099730,0.00137164828832637,4.52996729499983e-05};
///Scenario 4:
//real parameters []={14.6970262149558,2.32527331724419e-05,0.000121747898718481,0.000276971880166082,0.210038991991875,0.120908114803453,0.200498466936257,5.12988959137240,0.0151231713364490,1.26415205898593,1083.02600285230,0.000542147164379904,0.160470068504854,0.0146070055973378,0.00183114105726186,1.00487709573505e-05};
//Scenario4_1_106_pop76
//real parameters []={14.4701107547473,0.000162061905578968,0.000188488521383406,0.000572929459830166,0.335244898151308,0.119541023695594,0.248924317567785,5.19603253018384,0.0221271053316735,2.03169412747953,1099.72574265209,0.000483122952800270,0.478907546954075,0.0199668557152203,0.00562797831559110,3.64128969863145e-05};
real GNa=parameters[0];
real GbNa=parameters[1];
real GCaL=parameters[2];
real GbCa=parameters[3];
real Gto=parameters[4];
real Gkr=parameters[5];
real Gks=parameters[6];
real GK1=parameters[7];
real GpK=parameters[8];
real knak=parameters[9];
real knaca=parameters[10];
real Vmaxup=parameters[11];
real GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
/// real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
/// real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
/// real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
/// real Gks=0.062;
///#endif
//Parameters for Ik1
/// real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
/// real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
/// real GNa=14.838;
//Parameters for IbNa
/// real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
/// real knak=1.362;
//Parameters for ICaL
/// real GCaL=0.000175;
//Parameters for IbCa
/// real GbCa=0.000592;
//Parameters for INaCa
/// real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
/// real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
/// real GpK=0.0146;
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
7e8388440250065f207a617e713629ca6a270b1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright 2019 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
AffineAccessor<float, 1> ra_x,
AffineAccessor<float, 1> ra_y,
AffineAccessor<float, 1> ra_z)
// const float *x, const float *y, float *z)
{
int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x;
if (p <= bounds.hi)
ra_z[p] += alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get affine accessors for each of our three instances
AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst,
FID_X);
AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst,
FID_Y);
AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst,
FID_Z);
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
hipLaunchKernelGGL(( gpu_saxpy), dim3(total_ctas), dim3(cta_threads), 0, 0, saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
| 7e8388440250065f207a617e713629ca6a270b1b.cu | /* Copyright 2019 Stanford University, NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "realm_saxpy.h"
__global__
void gpu_saxpy(const float alpha,
//const int num_elements,
Rect<1> bounds,
AffineAccessor<float, 1> ra_x,
AffineAccessor<float, 1> ra_y,
AffineAccessor<float, 1> ra_z)
// const float *x, const float *y, float *z)
{
int p = bounds.lo + (blockIdx.x * blockDim.x) + threadIdx.x;
if (p <= bounds.hi)
ra_z[p] += alpha * ra_x[p] + ra_y[p];
}
__host__
void gpu_saxpy_task(const void *args, size_t arglen,
const void *userdata, size_t userlen, Processor p)
{
assert(arglen == sizeof(SaxpyArgs));
const SaxpyArgs *saxpy_args = (const SaxpyArgs*)args;
printf("Running GPU Saxpy Task\n\n");
// get affine accessors for each of our three instances
AffineAccessor<float, 1> ra_x = AffineAccessor<float, 1>(saxpy_args->x_inst,
FID_X);
AffineAccessor<float, 1> ra_y = AffineAccessor<float, 1>(saxpy_args->y_inst,
FID_Y);
AffineAccessor<float, 1> ra_z = AffineAccessor<float, 1>(saxpy_args->z_inst,
FID_Z);
size_t num_elements = saxpy_args->bounds.volume();
size_t cta_threads = 256;
size_t total_ctas = (num_elements + (cta_threads-1))/cta_threads;
gpu_saxpy<<<total_ctas, cta_threads>>>(saxpy_args->alpha, saxpy_args->bounds,
ra_x, ra_y, ra_z);
// LOOK: NO WAIT! :)
}
|
e144edde04b2f7b49c431ad546c693ef1619862d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "transpose_smem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
hipMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
hipMalloc(&out, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
transpose_smem), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
transpose_smem), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
transpose_smem), dim3(gridBlock),dim3(threadBlock), 0, 0, in,out,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | e144edde04b2f7b49c431ad546c693ef1619862d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "transpose_smem.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *in = NULL;
cudaMalloc(&in, XSIZE*YSIZE);
int *out = NULL;
cudaMalloc(&out, XSIZE*YSIZE);
int nx = 1;
int ny = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
transpose_smem<<<gridBlock,threadBlock>>>(in,out,nx,ny);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
transpose_smem<<<gridBlock,threadBlock>>>(in,out,nx,ny);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
transpose_smem<<<gridBlock,threadBlock>>>(in,out,nx,ny);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5e1caf8be751f31d16b214b615fc7b99f710716e.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include <cusolverDn.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
#include "timer.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/advance.h>
#include <cmath>
#include <limits>
#include <thrust/fill.h>
#include "../include/cuda_utils.h"
namespace h2o4gpu {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data; // pointer to data on CPU
hipblasHandle_t handle; // handle for data on GPU
GpuData(const T *orig_data) : orig_data(orig_data) {
hipblasCreate(&handle);
// fprintf(stderr,"HEREstart: %ld\n",handle); fflush(stderr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
// fprintf(stderr,"HEREend: %ld\n",handle); fflush(stderr);
if(handle!=NULL) hipblasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
hipblasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? HIPBLAS_OP_N : HIPBLAS_OP_T;
}
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// original MatrixDense where only trainX and no trainY or validX or validY
// Used by elastic_net.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_me=_wDev; // assume thread same as wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense1: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// unlike CPU case, input pointer is always CPU so have to always allocate on GPU when calling this function. So no use of sharedA related to pointer copy like in CPU case.
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: MatrixDense<T>(0, 0, ord, m, n, data){} // assume sharedA=0 and thread=wDev=0 if not given
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, int datatype, char ord, size_t m, size_t n, T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0),_de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
_me=_wDev; // assume thread=wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense2: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
if(datatype==1){
// input data pointer is already on GPU on this wDev, so just copy pointer
// no info->orig_data, so send 0 to GpuData
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("MDnew",MDnew,1);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
// source pointer is on this GPU
// just copy GPU pointer
_data = data;
if(!this->_done_alloc){
this->_done_alloc = true;
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
else{
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy as going from CPU to GPU
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense3: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));fflush(stderr);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy even if sharedA!=0
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
hipMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
hipMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
hipMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
if(vinfo->orig_data){
hipMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfo->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(vinfoy->orig_data){
hipMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfoy->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(weightinfo->orig_data){
hipMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{// if no weights, set as unity weights
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: MatrixDense<T>(0,wDev,wDev,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and source thread=wDev if not given
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
// datatype=0: CPU pointer to data
// datatype=1: GPU pointer to data
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"%d\n", ord == 'r');
DEBUG_FPRINTF(stderr,"%d\n", ord == 'c');
DEBUG_FPRINTF(stderr,"ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
DEBUG_FPRINTF(stderr,"MatrixDense4: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, _wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
if(datatype==1){
// source pointer is on GPU already
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
// Just copy GPU pointer
_data = data;
_datay = datay;
_vdata = vdata;
_vdatay = vdatay;
_weight = weight;
if(_datay) _dopredict=0;
else _dopredict=1;
if(_weight==NULL){
DEBUG_FPRINTF(stderr,"datatype=1: making up unity weights: %d %p\n",m,&_weight);
CUDACHECK(hipMalloc(&_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
if(!this->_done_alloc){
this->_done_alloc = true;
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
}
else{
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
hipMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
hipMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
hipMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
hipMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
hipMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
hipMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
hipMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
hipMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
if(weightinfo->orig_data){
hipMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),hipMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"datatype=0: making up unity weights: %d\n",m);
CUDACHECK(hipMalloc(&_weight, this->_m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + this->_m, fill_value);
}
hipMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: MatrixDense<T>(0,wDev,wDev,datatype,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and thread=wDev if not given
// MatrixDense where input actual A object that contains all CPU information, but need to go from 1 GPU to multiple GPU
// Used by elastic_net_ptr.cpp inside openmp loop for each core
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n, A._mvalid), _sharedA(sharedA), _me(me), _wDev(wDev), _data(0),_de(0), _ord(A._ord) {
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"MatrixDense5: ord=%c m=%d n=%d mValid=%d\n",A._ord,A._m,A._n,A._mvalid);
PUSH_RANGE("MDnew",MDnew,2);
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info); // cast from void to GpuData
GpuData<T> *infoy_A = reinterpret_cast<GpuData<T>*>(A._infoy); // cast from void to GpuData
GpuData<T> *vinfo_A = reinterpret_cast<GpuData<T>*>(A._vinfo); // cast from void to GpuData
GpuData<T> *vinfoy_A = reinterpret_cast<GpuData<T>*>(A._vinfoy); // cast from void to GpuData
GpuData<T> *weightinfo_A = reinterpret_cast<GpuData<T>*>(A._weightinfo); // cast from void to GpuData
GpuData<T> *info;
GpuData<T> *infoy;
GpuData<T> *vinfo;
GpuData<T> *vinfoy;
GpuData<T> *weightinfo;
if(info_A->orig_data) info = new GpuData<T>(info_A->orig_data); // create new GpuData structure with point to CPU data
else info = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(infoy_A->orig_data) infoy = new GpuData<T>(infoy_A->orig_data); // create new GpuData structure with point to CPU data
else infoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfo_A->orig_data) vinfo = new GpuData<T>(vinfo_A->orig_data); // create new GpuData structure with point to CPU data
else vinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfoy_A->orig_data) vinfoy = new GpuData<T>(vinfoy_A->orig_data); // create new GpuData structure with point to CPU data
else vinfoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(weightinfo_A->orig_data) weightinfo = new GpuData<T>(weightinfo_A->orig_data); // create new GpuData structure with point to CPU data
else weightinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
this->_info = reinterpret_cast<void*>(info); // back to cast as void
this->_infoy = reinterpret_cast<void*>(infoy); // back to cast as void
this->_vinfo = reinterpret_cast<void*>(vinfo); // back to cast as void
this->_vinfoy = reinterpret_cast<void*>(vinfoy); // back to cast as void
this->_weightinfo = reinterpret_cast<void*>(weightinfo); // back to cast as void
POP_RANGE("MDnew",MDnew,2);
if(!this->_done_alloc){
this->_done_alloc = true;
if(A._wDev == _wDev && A._me == _me && (A._sharedA==0 || _sharedA==0)){ // if on same device and same thread, just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",0);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
// Init();
// this->_done_equil=1;
}
else if(A._wDev == _wDev && A._sharedA!=0 && _sharedA!=0){ // if on same device and sharing memory, then just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",1);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
Init();
this->_done_equil=1;
}
else{
DEBUG_FPRINTF(stderr,"ATYPE%d\n",2);
// Copy Matrix to from source GPU to this GPU
PUSH_RANGE("MDcopy",MDcopy,1);
//GpuData<T> *info = reinterpret_cast<GpuData<T>*>(_info); // cast void -> GpuData
double t0 = timer<double>();
if(A._data) hipMalloc(&_data, A._m * A._n * sizeof(T)); // allocate on GPU
if(A._datay) hipMalloc(&_datay, A._m * sizeof(T)); // allocate on GPU
if(A._vdata) hipMalloc(&_vdata, A._mvalid * A._n * sizeof(T)); // allocate on GPU
if(A._vdatay) hipMalloc(&_vdatay, A._mvalid * sizeof(T)); // allocate on GPU
if(A._weight) hipMalloc(&_weight, A._m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
if(A._data) hipMemcpyPeer(_data, _wDev, A._data, A._wDev, A._m * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._datay){
hipMemcpyPeer(_datay, _wDev, A._datay, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
_dopredict=0;
}
else{
_dopredict=1;
}
if(A._vdata) hipMemcpyPeer(_vdata, _wDev, A._vdata, A._wDev, A._mvalid * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._vdatay) hipMemcpyPeer(_vdatay, _wDev, A._vdatay, A._wDev, A._mvalid * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._weight) hipMemcpyPeer(_weight, _wDev, A._weight, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._de) hipMalloc(&_de, (A._m + A._n) * sizeof(T)); hipMemcpyPeer(_de, _wDev, A._de, A._wDev, (A._m + A._n) * sizeof(T));
if(sharedA>0){
Init();
Equil(1);
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDcopy",MDcopy,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int me, int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(0, me, wDev, A){} // then assume not sharing memory
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(wDev, wDev, A){} // then assume thread=wDev for the new matrix (i.e. not input A)
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: MatrixDense<T>(A._wDev, A){} // then assume same device as input A
template <typename T>
MatrixDense<T>::~MatrixDense() {
// return;//TODO: Some deconstructor issue FIXME. Segfaults after adding weights. Can't find issue.
checkwDev(_wDev);
CUDACHECK(hipSetDevice(_wDev));
if(0){
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
GpuData<T> *infoy = reinterpret_cast<GpuData<T>*>(this->_infoy);
GpuData<T> *vinfo = reinterpret_cast<GpuData<T>*>(this->_vinfo);
GpuData<T> *vinfoy = reinterpret_cast<GpuData<T>*>(this->_vinfoy);
GpuData<T> *weightinfo = reinterpret_cast<GpuData<T>*>(this->_weightinfo);
if(info) delete info; this->_info = 0;
if(infoy) delete infoy; this->_infoy = 0;
if(vinfo) delete vinfo; this->_vinfo = 0;
if(vinfoy) delete vinfoy; this->_vinfoy = 0;
if(weightinfo) delete weightinfo; this->_weightinfo = 0;
}
// fprintf(stderr,"HERE1\n"); fflush(stderr);
if(0){ // Note that this frees these pointers as soon as MatrixDense constructor goes out of scope, and might want more fine-grained control over GPU memory if inside (say) high-level python API
// If 0 is used, then need to ensure user calls a finish() or something to free memory. If 0, also allows user to call (say) fit() or fitptr() multiple times
if (this->_done_init && _data) {
// fprintf(stderr,"Freeing _data: %p\n",(void*)_data); fflush(stderr);
hipFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE2\n"); fflush(stderr);
if (this->_done_init && _datay) {
// fprintf(stderr,"Freeing _datay: %p\n",(void*)_datay); fflush(stderr);
hipFree(_datay);
this->_datay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE3\n"); fflush(stderr);
if (this->_done_init && _vdata) {
// fprintf(stderr,"Freeing _vdata: %p\n",(void*)_vdata); fflush(stderr);
hipFree(_vdata);
this->_vdata = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE4\n"); fflush(stderr);
if (this->_done_init && _vdatay) {
// fprintf(stderr,"Freeing _vdatay: %p\n",(void*)_vdatay); fflush(stderr);
hipFree(_vdatay);
this->_vdatay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE5\n"); fflush(stderr);
if (this->_done_init && _weight) {
// fprintf(stderr,"Freeing _weight: %p\n",(void*)_weight); fflush(stderr);
hipFree(_weight);
this->_weight = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE6\n"); fflush(stderr);
if(this->_done_init && _de && !_sharedA){ // JONTODO: When sharedA=1, only free on sourceme thread and sourcewDev device (can store sourcethread for-- sourceme -- data and only free if on source thread)
// fprintf(stderr,"Freeing _de: %p\n",(void*)_weight); fflush(stderr);
hipFree(_de);
this->_de=0;
DEBUG_CUDA_CHECK_ERR();
}
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(hipSetDevice(_wDev));
PUSH_RANGE("MDinit",MDinit,1);
POP_RANGE("MDinit",MDinit,1);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::GetTrainX(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_data){
if(datatype==1){
hipMemcpy(*data, _data, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _data, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetTrainY(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_datay){
if(datatype==1){
hipMemcpy(*data, _datay, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _datay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidX(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_vdata){
if(datatype==1){
hipMemcpy(*data, _vdata, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdata, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidY(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_vdatay){
if(datatype==1){
hipMemcpy(*data, _vdatay, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdatay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetWeight(int datatype, size_t size, T**data) const {
CUDACHECK(hipSetDevice(_wDev));
if(_weight){
if(datatype==1){
hipMemcpy(*data, _weight, size* sizeof(T),hipMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _weight, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(hipSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _data is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mulvalid(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(hipSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_mvalid);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _vdata is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
// col-major order (fortran) A, but still print as row major
template <typename T>
void printMatrix(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// row-major order (c) A printed as row major
template <typename T>
void printMatrix2(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[col + row*n];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include svd_example.cpp
* g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver
*
*/
inline cusolverStatus_t cusolverDngesvd ( hipsolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *work, int lwork, float *rwork, int *devInfo){
return(hipsolverDnSgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cusolverStatus_t cusolverDngesvd ( hipsolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *work, int lwork, double *rwork, int *devInfo){
return(hipsolverDnDgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline hipblasStatus_t cublasgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc){
return(hipblasSgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline hipblasStatus_t cublasgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc){
return(hipblasDgemm(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline hipblasStatus_t cublasdgmm(hipblasHandle_t handle,
hipblasSideMode_t mode,
int m,
int n,
const float *A,
int lda,
const float *x,
int incx,
float *C,
int ldc){
return(hipblasSdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline hipblasStatus_t cublasdgmm(hipblasHandle_t handle,
hipblasSideMode_t mode,
int m,
int n,
const double *A,
int lda,
const double *x,
int incx,
double *C,
int ldc){
return(hipblasDdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline hipblasStatus_t cublasnrm2(hipblasHandle_t handle,
int n,
const double *x,
int incx,
double *result){
return(hipblasDnrm2(handle,
n,
x,
incx,
result));
}
inline hipblasStatus_t cublasnrm2(hipblasHandle_t handle,
int n,
const float *x,
int incx,
float *result){
return(hipblasSnrm2(handle,
n,
x,
incx,
result));
}
// // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// __global__ void transposeNaive(float *odata, float* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeNaive(double *odata, double* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeCoalesced(float *odata,
// float *idata, int width, int height)
// {
// __shared__ float tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// __global__ void transposeCoalesced(double *odata,
// double *idata, int width, int height)
// {
// __shared__ double tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// in-place transpose for row-major matrix on device of A[m][n]
void cudaintranspose(float *odata, float *idata, int m, int n){
hipError_t cudaStat1 = hipSuccess;
cudaStat1 = hipMemcpy(odata, idata, sizeof(float)*m*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
float const alpha(1.0);
float const beta(0.0);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasSgeam( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
hipblasDestroy(handle);
}
void cudaintranspose(double *odata, double *idata, int m, int n){
hipError_t cudaStat1 = hipSuccess;
cudaStat1 = hipMemcpy(odata, idata, sizeof(double)*m*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
double const alpha(1.0);
double const beta(0.0);
hipblasHandle_t handle;
hipblasCreate(&handle);
hipblasDgeam( handle, HIPBLAS_OP_T, HIPBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
hipblasDestroy(handle);
}
#define MIN(a,b) ((a)<(b) ? (a) : (b))
template <typename T>
int MatrixDense<T>::svd1(void) {
fprintf(stderr,"begin svd inside0\n"); fflush(stderr); fflush(stdout);
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
Init();
fprintf(stderr,"begin svd inside\n"); fflush(stderr); fflush(stdout);
hipsolverDnHandle_t cusolverH = NULL;
hipblasHandle_t cublasH = NULL;
hipblasStatus_t cublas_status = HIPBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
hipError_t cudaStat1 = hipSuccess;
hipError_t cudaStat2 = hipSuccess;
hipError_t cudaStat3 = hipSuccess;
hipError_t cudaStat4 = hipSuccess;
hipError_t cudaStat5 = hipSuccess;
hipError_t cudaStat6 = hipSuccess;
int m = this->_m;
int n = this->_n;
// const int m = this->_m;
// const int n = this->_n;
int lda = m;
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
unsigned char ord='r'; // TODO; should be inputted
// original device vector
T *d_A0;
d_A0 = this->_data;
// device vectors
T *d_A = NULL;
T *d_S = NULL;
T *d_U = NULL;
T *d_VT = NULL;
int *devInfo = NULL;
T *d_work = NULL;
T *d_rwork = NULL;
T *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
const T h_one = 1;
const T h_minus_one = -1;
double t0 = timer<double>();
// step 1: create cusolverDn/cublas handle
cusolver_status = hipsolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
cublas_status = hipblasCreate(&cublasH);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
fprintf(stderr,"HERE1\n"); fflush(stderr); fflush(stdout);
// step 2: copy A to device
// cudaStat1 = hipMalloc ((void**)&d_A , sizeof(T)*lda*n);
// svd destroys d_A, so make copy for testing error // OPTMARK
cudaStat1 = hipMalloc ((void**)&d_A , sizeof(T)*lda*n);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipMemcpy(d_A, d_A0, sizeof(T)*lda*n, hipMemcpyDeviceToDevice);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
int ldu=m; //lda;
int ldureal=n; // actual storage
int ldvt=n;
if(ord=='r'){
// transpose
// execution configuration parameters
//dim3 grid(n/TILE_DIM, lda/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
// transposeCoalesced<<<grid, threads>>>(d_A, d_A0, n, lda);
// transposeNaive<<<grid, threads>>>(d_A, d_A0, n, lda);
cudaintranspose(d_A,d_A0,m,n); // OPTMARK
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
// below debug only for printMatrix2 to view, shouldn't actually swap for use.
if(0){
int temp=m;
m=n;
n=temp;
lda=m;
ldu=m; //lda;
ldureal=n; // actual storage
ldvt=n;
}
}
else{
d_A = d_A0;
}
fprintf(stderr,"HERE PRE\n"); fflush(stderr); fflush(stdout);
// old host side vectors
// T A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
// GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(this->_info); // cast from void to GpuData
// T *A = const_cast<T*>(info_A->orig_data);
#if(0)
T A[lda*n]; // for debug
T U[ldureal*m]; // m-by-m unitary matrix
T VT[ldvt*n]; // n-by-n unitary matrix
T S[MIN(n,m)]; // singular value
#endif
// T S_exact[n] = {7.065283497082729, 1.040081297712078};
fprintf(stderr,"HERE POST\n"); fflush(stderr); fflush(stdout);
// now d_A has column-major order matrix
fprintf(stderr,"HERE2\n"); fflush(stderr); fflush(stdout);
#if(0) // debug
cudaStat1 = hipMemcpy(A, d_A, sizeof(T)*lda*n, hipMemcpyDeviceToHost);
assert(hipSuccess == cudaStat1);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
printf("A = (matlab base-1)\n");
printMatrix(m, n, A, lda, "A");
printf("=====\n");
printf("A = (matlab base-1)\n");
printMatrix2(m, n, A, lda, "A");
printf("=====\n");
#endif
fprintf(stderr,"HERE3\n"); fflush(stderr); fflush(stdout);
cudaStat2 = hipMalloc ((void**)&d_S , sizeof(T)*MIN(n,m));
cudaStat3 = hipMalloc ((void**)&d_U , sizeof(T)*ldureal*m);
cudaStat4 = hipMalloc ((void**)&d_VT , sizeof(T)*ldvt*n);
cudaStat5 = hipMalloc ((void**)&devInfo, sizeof(int));
cudaStat6 = hipMalloc ((void**)&d_W , sizeof(T)*lda*n);
// assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
assert(hipSuccess == cudaStat5);
assert(hipSuccess == cudaStat6);
// host->device
// cudaStat1 = hipMemcpy(d_A, A, sizeof(T)*lda*n, hipMemcpyHostToDevice);
// assert(hipSuccess == cudaStat1);
// step 3: query working space of SVD
//The dense matrices are assumed to be stored in column-major order in memory.
cusolver_status = hipsolverDnDgesvd_bufferSize(
cusolverH,
m,
n,
&lwork );
assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
cudaStat1 = hipMalloc((void**)&d_work , sizeof(T)*lwork);
assert(hipSuccess == cudaStat1);
double t1 = timer<double>();
fprintf(stderr,"SVD init: %g\n",t1-t0); fflush(stderr); fflush(stdout);
// step 4: compute SVD
double t0c = timer<double>();
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = cusolverDngesvd(
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
ldu,
d_VT,
ldvt,
d_work,
lwork,
d_rwork,
devInfo);
cudaStat4 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost);
printf("after gesvd: info_gpu = %d\n", info_gpu); fflush(stdout);
assert(0 == info_gpu);
printf("=====\n"); fflush(stdout);
cudaStat1 = hipDeviceSynchronize();
assert(hipSuccess == cudaStat1);
fprintf(stderr,"BAD: %d\n",cusolver_status); fflush(stderr);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
double t1c = timer<double>();
fprintf(stderr,"SVD compute: %g\n",t1-t0); fflush(stderr); fflush(stdout);
#if(0)
/////////////////////////
// Copy solution device->host
double t0h = timer<double>();
cudaStat1 = hipMemcpy(U , d_U , sizeof(T)*ldureal*m, hipMemcpyDeviceToHost);
cudaStat2 = hipMemcpy(VT, d_VT, sizeof(T)*ldvt*n, hipMemcpyDeviceToHost);
cudaStat3 = hipMemcpy(S , d_S , sizeof(T)*MIN(n,m), hipMemcpyDeviceToHost);
assert(hipSuccess == cudaStat1);
assert(hipSuccess == cudaStat2);
assert(hipSuccess == cudaStat3);
assert(hipSuccess == cudaStat4);
if(0){ // debug
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, ldureal, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, ldvt, "VT");
printf("=====\n");
/////////////////////////
// measure error of singular value
// T ds_sup = 0;
// for(int j = 0; j < n; j++){
// T err = fabs( S[j] - S_exact[j] );
// ds_sup = (ds_sup > err)? ds_sup : err;
// }
// printf("|S - S_exact| = %E \n", ds_sup);
}
double t1h = timer<double>();
fprintf(stderr,"SVD back to host: %g\n",t1h-t0h); fflush(stderr); fflush(stdout);
#endif
/////////////////////////
// now check
double t0c1 = timer<double>();
// step 5: |A - U*S*VT|
// W = S*VT
cublas_status = cublasdgmm(
cublasH,
HIPBLAS_SIDE_LEFT,
n,
n,
d_VT,
ldvt,
d_S,
1,
d_W,
lda);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
double t1c1 = timer<double>();
fprintf(stderr,"SVD check1: %g\n",t1c1-t0c1); fflush(stderr); fflush(stdout);
// A := -U*W + A
double t0c2 = timer<double>();
cudaStat1 = hipMemcpy(d_A, d_A0, sizeof(T)*lda*n, hipMemcpyDeviceToDevice); // copy because original d_A was destroyed
assert(hipSuccess == cudaStat1);
cublas_status = cublasgemm(
cublasH,
HIPBLAS_OP_N, // U
HIPBLAS_OP_N, // W
m, // number of rows of A
n, // number of columns of A
n, // number of columns of U
&h_minus_one, /* host pointer */
d_U, // U
ldu,
d_W, // W
lda,
&h_one, /* hostpointer */
d_A,
lda);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
double t1c2 = timer<double>();
fprintf(stderr,"SVD check2: %g\n",t1c2-t0c2); fflush(stderr); fflush(stdout);
double t0c3 = timer<double>();
T dR_fro = 0.0;
cublas_status = cublasnrm2(
cublasH, lda*n, d_A, 1, &dR_fro);
assert(HIPBLAS_STATUS_SUCCESS == cublas_status);
printf("|A - U*S*VT| = %E \n", dR_fro); fflush(stdout);
double t1c3 = timer<double>();
fprintf(stderr,"SVD check3: %g\n",t1c3-t0c3); fflush(stderr); fflush(stdout);
// free resources
double t0f = timer<double>();
//if (d_A ) hipFree(d_A);
if (d_S ) hipFree(d_S);
if (d_U ) hipFree(d_U);
if (d_VT ) hipFree(d_VT);
if (devInfo) hipFree(devInfo);
if (d_work ) hipFree(d_work);
if (d_rwork) hipFree(d_rwork);
if (d_W ) hipFree(d_W);
if (cublasH ) hipblasDestroy(cublasH);
if (cusolverH) hipsolverDnDestroy(cusolverH);
// hipDeviceReset();
double t1f = timer<double>();
fprintf(stderr,"SVD free: %g\n",t1f-t0f); fflush(stderr); fflush(stdout);
fprintf(stderr,"end svd inside\n"); fflush(stderr); fflush(stdout);
return 0;
}
// Equilibration (precondition) matrix using Sinkhorn Knopp method wrapped to allow any norm
// See https://arxiv.org/pdf/1610.03871.pdf for more information
template <typename T>
int MatrixDense<T>::Equil(bool equillocal) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
if (this->_done_equil) return 0;
else this->_done_equil=1;
CUDACHECK(hipSetDevice(_wDev));
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
T *d = _de;
T *e = d + this->_m;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
hipMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if(equillocal){
// Fill sign bits, assigning each thread a multiple of 8 elements.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
hipLaunchKernelGGL(( __SetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Perform Sinkhorn-Knopp equilibration to obtain a doubly stochastic matrix.
SinkhornKnopp(this, d, e, equillocal);
wrapcudaDeviceSynchronize();
if(equillocal){
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSign), dim3(grid_size), dim3(cml::kBlockSize), 0, 0, _data, sign, num_chars,
IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
hipLaunchKernelGGL(( __UnSetSignSingle), dim3(1), dim3(1), 0, 0, _data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
wrapcudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
wrapcudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
wrapcudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
hipFree(sign);
CUDA_CHECK_ERR();
return 0;
}
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
template <typename T>
struct summary_stats_data
{
T n;
T min;
T max;
T mean;
T M2;
T M3;
T M4;
// initialize to the identity element
void initialize()
{
n = mean = M2 = M3 = M4 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return M2 / (n - 1); }
T variance_n() { return M2 / n; }
T skewness() { return std::sqrt(n) * M3 / ::pow(M2, (T) 1.5); }
T kurtosis() { return n * M4 / (M2 * M2); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op
{
__host__ __device__
summary_stats_data<T> operator()(const T& x) const
{
summary_stats_data<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
result.M3 = 0;
result.M4 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two summary_stats_data
// structs and returns a new summary_stats_data which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const summary_stats_data<T>&,
const summary_stats_data<T>&,
summary_stats_data<T> >
{
__host__ __device__
summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const
{
summary_stats_data<T> result;
// precompute some common subexpressions
T n = x.n + y.n;
T n2 = n * n;
T n3 = n2 * n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
T delta3 = delta2 * delta;
T delta4 = delta3 * delta;
//Basic number of samples (n), min, and max
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
result.M3 = x.M3 + y.M3;
result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2;
result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n;
result.M4 = x.M4 + y.M4;
result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3;
result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2;
result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n;
return result;
}
};
template <typename Iterator>
void print_range(const std::string& name, Iterator first, Iterator last)
{
typedef typename std::iterator_traits<Iterator>::value_type T;
std::cout << name << ": ";
thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " "));
std::cout << "\n";
}
template<typename T>
struct absolute_value : public thrust::unary_function<T,T>
{
__host__ __device__ T operator()(const T &x) const
{
return x < T(0) ? -x : x;
}
};
// --- Operator for testing nan values
template<typename T>
struct isnan_test {
__host__ __device__ bool operator()(const T a) const {
return isnan(a) || isinf(a);
}
};
// check properties of input data
template <typename T>
int MatrixDense<T>::Stats(int intercept, T *min, T *max, T *mean, T *var, T *sd, T *skew, T *kurt, T &lambda_max0)
{
CUDACHECK(hipSetDevice(_wDev));
if(_data!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_data);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_data+this->_m*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data matrix (trainX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_datay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_datay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_datay+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data training predictions/labels (trainY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdata!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdata);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdata+this->_mvalid*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data matrix (validX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdatay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdatay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdatay+this->_mvalid);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data training predictions/labels (validY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_weight!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_weight);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_weight+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Weight Training Data has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
// nothing else to do if _datay==NULL
if(_datay==NULL) return(0);
// setup arguments
summary_stats_unary_op<T> unary_op;
summary_stats_binary_op<T> binary_op;
summary_stats_data<T> init;
init.initialize();
int len=0;
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> dataybegin=thrust::device_pointer_cast(_datay);
len=this->_m;
thrust::device_ptr<T> datayend=thrust::device_pointer_cast(_datay+len);
// compute summary statistics
summary_stats_data<T> resulty = thrust::transform_reduce(dataybegin, datayend, unary_op, init, binary_op);
min[0]=resulty.min;
max[0]=resulty.max;
mean[0]=resulty.mean;
var[0]=resulty.variance();
sd[0]=std::sqrt(resulty.variance_n());
skew[0]=resulty.skewness();
kurt[0]=resulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Train*****"<<std::endl;
// print_range("The data", dataybegin, datayend);
std::cout <<"Count : "<< resulty.n << std::endl;
std::cout <<"Minimum : "<< min[0]<<std::endl;
std::cout <<"Maximum : "<< max[0]<<std::endl;
std::cout <<"Mean : "<< mean[0]<< std::endl;
std::cout <<"Variance : "<< var[0]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[0]<< std::endl;
std::cout <<"Skewness : "<< skew[0]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[0]<< std::endl;
#endif
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> vdataybegin=thrust::device_pointer_cast(_vdatay);
len=this->_mvalid;
thrust::device_ptr<T> vdatayend=thrust::device_pointer_cast(_vdatay+len);
// compute summary statistics
summary_stats_data<T> vresulty = thrust::transform_reduce(vdataybegin, vdatayend, unary_op, init, binary_op);
min[1]=vresulty.min;
max[1]=vresulty.max;
mean[1]=vresulty.mean;
var[1]=vresulty.variance();
sd[1]=std::sqrt(vresulty.variance_n());
skew[1]=vresulty.skewness();
kurt[1]=vresulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Valid*****"<<std::endl;
// print_range("The data", vdataybegin, vdatayend);
std::cout <<"Count : "<< vresulty.n << std::endl;
std::cout <<"Minimum : "<< min[1]<<std::endl;
std::cout <<"Maximum : "<< max[1]<<std::endl;
std::cout <<"Mean : "<< mean[1]<< std::endl;
std::cout <<"Variance : "<< var[1]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[1]<< std::endl;
std::cout <<"Skewness : "<< skew[1]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[1]<< std::endl;
#endif
if(1){ // normal usage
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
hipblasHandle_t hdl = info->handle;
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(_datay, this->_m); // b
cml::vector<T> weight_vec;
if(_weight) weight_vec = cml::vector_view_array(_weight, this->_m); // weight
else{
weight_vec = cml::vector_calloc<T>(this->_m); // weight make up
cml::vector_add_constant(&weight_vec, static_cast<T>(1.0)); // make unity weights
}
cml::vector<T> ytemp = cml::vector_calloc<T>(this->_m); // b
cml::vector<T> xtemp = cml::vector_calloc<T>(this->_n); // x
cml::vector_memcpy(&ytemp, &y_vec); // y_vec->ytemp
cml::vector_add_constant(&ytemp, -static_cast<T>(intercept)*mean[0]); // ytemp -> ytemp - intercept*mean[0]
cml::vector_mul(&ytemp,&weight_vec); // ytemp*weight -> ytemp
// Compute A^T . b
if (_ord == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
else{
const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, HIPBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(&xtemp.data[0]);
lambda_max0 = thrust::transform_reduce(thrust::device,
dev_ptr, dev_ptr + this->_n-intercept,
absolute_value<T>(),
static_cast<T>(0.0),
thrust::maximum<T>());
}
else{
lambda_max0 = 7000; // test
}
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(hipblasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultRow), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
hipLaunchKernelGGL(( __MultCol), dim3(grid_dim_row), dim3(cml::kBlockSize), 0, 0, m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class MatrixDense<float>;
#endif
// upload data function. Uploads to a single GPU.
// mimics otherwise similar MatrixDense constructor, but has no destruction of uploaded data pointers
template <typename T>
int makePtr_dense(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight, T **_data, T **_datay, T **_vdata, T **_vdatay, T **_weight){
checkwDev(wDev);
CUDACHECK(hipSetDevice(wDev));
DEBUG_FPRINTF(stderr,"makePtr_dense: %d\n",0);
#ifdef DEBUG
// CUDACHECK(hipSetDeviceFlags(hipDeviceMapHost)); // TODO: MapHostMemory
hipDeviceProp_t props;
CUDACHECK(hipGetDeviceProperties(&props, wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
// Copy Matrix to GPU (unlike CPU case, cannot copy just pointer because always assume input is CPU and output is GPU)
double t0 = timer<double>();
PUSH_RANGE("MDsendsource",MDsendsource,1);
if(data){
CUDACHECK(hipMalloc(_data, m * n * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_data, data, m * n * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_data: %p\n",(void*)*_data); fflush(stderr);
}
else *_data=NULL;
if(datay){
CUDACHECK(hipMalloc(_datay, m * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_datay, datay, m * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_datay: %p\n",(void*)*_datay); fflush(stderr);
}
else *_datay=NULL;
if(vdata){
CUDACHECK(hipMalloc(_vdata, mValid * n * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_vdata, vdata, mValid * n * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdata: %p\n",(void*)*_vdata); fflush(stderr);
}
else *_vdata=NULL;
if(vdatay){
CUDACHECK(hipMalloc(_vdatay, mValid * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_vdatay, vdatay, mValid * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdatay: %p\n",(void*)*_vdatay); fflush(stderr);
}
else *_vdatay=NULL;
// fprintf(stderr,"weight=%p\n",weight); fflush(stderr);
if(weight){
CUDACHECK(hipMalloc(_weight, m * sizeof(T))); // allocate on GPU
CUDACHECK(hipMemcpy(*_weight, weight, m * sizeof(T),hipMemcpyHostToDevice)); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"making up unity weights: %d\n",m);
CUDACHECK(hipMalloc(_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(*_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
// fprintf(stderr,"_weight: %p\n",(void*)*_weight); fflush(stderr);
}
POP_RANGE("MDsendsource",MDsendsource,1);
double t2 = timer<double>();
DEBUG_FPRINTF(stdout,"Time to allocate and copy the data matrix on the GPU: %f\n", t2-t0);
hipDeviceSynchronize();
DEBUG_FPRINTF(stderr,"pointer data %p\n",(void*)*_data);
DEBUG_FPRINTF(stderr,"pointer datay %p\n",(void*)*_datay);
DEBUG_FPRINTF(stderr,"pointer vdata %p\n",(void*)*_vdata);
DEBUG_FPRINTF(stderr,"pointer vdaty %p\n",(void*)*_vdatay);
DEBUG_FPRINTF(stderr,"pointer weight %p\n",(void*)*_weight);
return(0);
}
template int makePtr_dense<double>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const double *data, const double *datay, const double *vdata, const double *vdatay, const double *weight,
double **_data, double **_datay, double **_vdata, double **_vdatay, double **_weight);
template int makePtr_dense<float>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const float *data, const float *datay, const float *vdata, const float *vdatay, const float *weight,
float **_data, float **_datay, float **_vdata, float **_vdatay, float **_weight);
template <typename T>
int modelFree1(T *aptr){
if(aptr!=NULL){
// for now, freed during ~
//hipFree(aptr);
//CUDA_CHECK_ERR();
}
return(0);
}
template int modelFree1<float>(float *aptr);
template int modelFree1<double>(double *aptr);
} // namespace h2o4gpu
int modelfree1_double(double *aptr){
return h2o4gpu::modelFree1<double>(aptr);
}
int modelfree1_float(float *aptr){
return h2o4gpu::modelFree1<float>(aptr);
}
int make_ptr_double(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const double* trainX, const double* trainY, const double* validX, const double* validY, const double *weight,
double**a, double**b, double**c, double**d, double **e) {
return h2o4gpu::makePtr_dense<double>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
int make_ptr_float(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const float* trainX, const float* trainY, const float* validX, const float* validY, const float *weight,
float**a, float**b, float**c, float**d, float **e) {
return h2o4gpu::makePtr_dense<float>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
| 5e1caf8be751f31d16b214b615fc7b99f710716e.cu | /*!
* Modifications Copyright 2017 H2O.ai, Inc.
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include <cusolverDn.h>
#include "cml/cml_blas.cuh"
#include "cml/cml_matrix.cuh"
#include "cml/cml_vector.cuh"
#include "equil_helper.cuh"
#include "matrix/matrix.h"
#include "matrix/matrix_dense.h"
#include "util.h"
#include "timer.h"
#include <thrust/device_ptr.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/transform_reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include <thrust/extrema.h>
#include <thrust/pair.h>
#include <thrust/advance.h>
#include <cmath>
#include <limits>
#include <thrust/fill.h>
#include "../include/cuda_utils.h"
namespace h2o4gpu {
////////////////////////////////////////////////////////////////////////////////
////////////////////////////// Helper Functions ////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// File scoped constants.
const NormTypes kNormEquilibrate = kNorm2;
const NormTypes kNormNormalize = kNormFro;
template<typename T>
struct GpuData {
const T *orig_data; // pointer to data on CPU
cublasHandle_t handle; // handle for data on GPU
GpuData(const T *orig_data) : orig_data(orig_data) {
cublasCreate(&handle);
// fprintf(stderr,"HEREstart: %ld\n",handle); fflush(stderr);
DEBUG_CUDA_CHECK_ERR();
}
~GpuData() {
// fprintf(stderr,"HEREend: %ld\n",handle); fflush(stderr);
if(handle!=NULL) cublasDestroy(handle);
DEBUG_CUDA_CHECK_ERR();
}
};
cublasOperation_t OpToCublasOp(char trans) {
ASSERT(trans == 'n' || trans == 'N' || trans == 't' || trans == 'T');
return trans == 'n' || trans == 'N' ? CUBLAS_OP_N : CUBLAS_OP_T;
}
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A);
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data);
} // namespace
////////////////////////////////////////////////////////////////////////////////
/////////////////////// MatrixDense Implementation /////////////////////////////
////////////////////////////////////////////////////////////////////////////////
// original MatrixDense where only trainX and no trainY or validX or validY
// Used by elastic_net.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, char ord, size_t m, size_t n, const T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_me=_wDev; // assume thread same as wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense1: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// unlike CPU case, input pointer is always CPU so have to always allocate on GPU when calling this function. So no use of sharedA related to pointer copy like in CPU case.
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(char ord, size_t m, size_t n, const T *data)
: MatrixDense<T>(0, 0, ord, m, n, data){} // assume sharedA=0 and thread=wDev=0 if not given
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int wDev, int datatype, char ord, size_t m, size_t n, T *data)
: Matrix<T>(m, n, 0), _sharedA(sharedA), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0),_de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
_me=_wDev; // assume thread=wDev if not given
_datay=NULL;
_vdata=NULL;
_vdatay=NULL;
_weight=NULL;
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense2: ord=%c m=%d n=%d\n",ord,(int)m,(int)n);fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
if(datatype==1){
// input data pointer is already on GPU on this wDev, so just copy pointer
// no info->orig_data, so send 0 to GpuData
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
POP_RANGE("MDnew",MDnew,1);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
// source pointer is on this GPU
// just copy GPU pointer
_data = data;
if(!this->_done_alloc){
this->_done_alloc = true;
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
else{
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_infoy = reinterpret_cast<void*>(infoy);
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfo = reinterpret_cast<void*>(vinfo);
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy as going from CPU to GPU
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(0), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
DEBUG_FPRINTF(stderr,"MatrixDense3: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));fflush(stderr);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
fprintf(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev); fflush(stderr);
#endif
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Unlike CPU case, can't pointer copy even if sharedA!=0
// Copy Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
if(vinfo->orig_data){
cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfo->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(vinfoy->orig_data){
cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
if(this->_mvalid>0){ fprintf(stderr,"vinfoy->orig_data NULL but this->_mvalid>0\n"); fflush(stderr); exit(1); }
}
if(weightinfo->orig_data){
cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{// if no weights, set as unity weights
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, char ord, size_t m, size_t n, size_t mValid, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight)
: MatrixDense<T>(0,wDev,wDev,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and source thread=wDev if not given
// like original MatrixDense, but also feed in CPU data for trainY, validX, and validY
// Used by elastic_net_ptr.cpp to pass CPU data and put on GPU
// datatype=0: CPU pointer to data
// datatype=1: GPU pointer to data
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: Matrix<T>(m, n, mValid), _sharedA(sharedA), _me(me), _wDev(wDev), _datatype(datatype), _dopredict(0), _data(0), _datay(0), _vdata(0), _vdatay(0), _weight(0), _de(0) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"%d\n", ord == 'r');
DEBUG_FPRINTF(stderr,"%d\n", ord == 'c');
DEBUG_FPRINTF(stderr,"ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
DEBUG_FPRINTF(stderr,"MatrixDense4: ord=%c m=%d n=%d mValid=%d\n",ord,(int)m,(int)n,int(mValid));
ASSERT(ord == 'r' || ord == 'R' || ord == 'c' || ord == 'C');
_ord = (ord == 'r' || ord == 'R') ? ROW : COL;
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, _wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
if(datatype==1){
// source pointer is on GPU already
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(0); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
// Just copy GPU pointer
_data = data;
_datay = datay;
_vdata = vdata;
_vdatay = vdatay;
_weight = weight;
if(_datay) _dopredict=0;
else _dopredict=1;
if(_weight==NULL){
DEBUG_FPRINTF(stderr,"datatype=1: making up unity weights: %d %p\n",m,&_weight);
CUDACHECK(cudaMalloc(&_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_weight[0]));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
}
if(!this->_done_alloc){
this->_done_alloc = true;
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
}
}
else{
// source pointer is on CPU
// Set GPU specific _info.
PUSH_RANGE("MDnew",MDnew,1);
GpuData<T> *info = new GpuData<T>(data); // new structure (holds pointer to data and GPU handle)
GpuData<T> *infoy = new GpuData<T>(datay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfo = new GpuData<T>(vdata); // new structure (holds pointer to data and GPU handle)
GpuData<T> *vinfoy = new GpuData<T>(vdatay); // new structure (holds pointer to data and GPU handle)
GpuData<T> *weightinfo = new GpuData<T>(weight); // new structure (holds pointer to data and GPU handle)
this->_info = reinterpret_cast<void*>(info);
this->_infoy = reinterpret_cast<void*>(infoy);
this->_vinfo = reinterpret_cast<void*>(vinfo);
this->_vinfoy = reinterpret_cast<void*>(vinfoy);
this->_weightinfo = reinterpret_cast<void*>(weightinfo);
POP_RANGE("MDnew",MDnew,1);
if(!this->_done_alloc){
this->_done_alloc = true;
// Copy CPU Matrix to GPU.
PUSH_RANGE("MDsend",MDsend,1);
// GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info); // cast void -> GpuData
double t0 = timer<double>();
cudaMalloc(&_data, this->_m * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_datay, this->_m * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdata, this->_mvalid * this->_n * sizeof(T)); // allocate on GPU
cudaMalloc(&_vdatay, this->_mvalid * sizeof(T)); // allocate on GPU
cudaMalloc(&_weight, this->_m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
cudaMemcpy(_data, info->orig_data, this->_m * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(infoy->orig_data){
cudaMemcpy(_datay, infoy->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
_dopredict=0;
}
else{
_dopredict=1;
}
cudaMemcpy(_vdata, vinfo->orig_data, this->_mvalid * this->_n * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
cudaMemcpy(_vdatay, vinfoy->orig_data, this->_mvalid * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
if(weightinfo->orig_data){
cudaMemcpy(_weight, weightinfo->orig_data, this->_m * sizeof(T),cudaMemcpyHostToDevice); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"datatype=0: making up unity weights: %d\n",m);
CUDACHECK(cudaMalloc(&_weight, this->_m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + this->_m, fill_value);
}
cudaMalloc(&_de, (m + n) * sizeof(T));
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(static_cast<T*>(&_de[0]));
T fill_value=0.0;
thrust::fill(dev_ptr, dev_ptr + (m + n), fill_value);
if(sharedA>0){
Init(); // does nothing right now
Equil(1); // JONTODO: Hack for now. Need to pass equil
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDsend",MDsend,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, int datatype, char ord, size_t m, size_t n, size_t mValid, T *data, T *datay, T *vdata, T *vdatay, T *weight)
: MatrixDense<T>(0,wDev,wDev,datatype,ord,m,n,mValid,data,datay,vdata,vdatay,weight){} // assume sharedA=0 and thread=wDev if not given
// MatrixDense where input actual A object that contains all CPU information, but need to go from 1 GPU to multiple GPU
// Used by elastic_net_ptr.cpp inside openmp loop for each core
template <typename T>
MatrixDense<T>::MatrixDense(int sharedA, int me, int wDev, const MatrixDense<T>& A)
: Matrix<T>(A._m, A._n, A._mvalid), _sharedA(sharedA), _me(me), _wDev(wDev), _data(0),_de(0), _ord(A._ord) {
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
DEBUG_FPRINTF(stderr,"MatrixDense5: ord=%c m=%d n=%d mValid=%d\n",A._ord,A._m,A._n,A._mvalid);
PUSH_RANGE("MDnew",MDnew,2);
GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(A._info); // cast from void to GpuData
GpuData<T> *infoy_A = reinterpret_cast<GpuData<T>*>(A._infoy); // cast from void to GpuData
GpuData<T> *vinfo_A = reinterpret_cast<GpuData<T>*>(A._vinfo); // cast from void to GpuData
GpuData<T> *vinfoy_A = reinterpret_cast<GpuData<T>*>(A._vinfoy); // cast from void to GpuData
GpuData<T> *weightinfo_A = reinterpret_cast<GpuData<T>*>(A._weightinfo); // cast from void to GpuData
GpuData<T> *info;
GpuData<T> *infoy;
GpuData<T> *vinfo;
GpuData<T> *vinfoy;
GpuData<T> *weightinfo;
if(info_A->orig_data) info = new GpuData<T>(info_A->orig_data); // create new GpuData structure with point to CPU data
else info = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(infoy_A->orig_data) infoy = new GpuData<T>(infoy_A->orig_data); // create new GpuData structure with point to CPU data
else infoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfo_A->orig_data) vinfo = new GpuData<T>(vinfo_A->orig_data); // create new GpuData structure with point to CPU data
else vinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(vinfoy_A->orig_data) vinfoy = new GpuData<T>(vinfoy_A->orig_data); // create new GpuData structure with point to CPU data
else vinfoy = new GpuData<T>(0); // create new GpuData structure with point to CPU data
if(weightinfo_A->orig_data) weightinfo = new GpuData<T>(weightinfo_A->orig_data); // create new GpuData structure with point to CPU data
else weightinfo = new GpuData<T>(0); // create new GpuData structure with point to CPU data
this->_info = reinterpret_cast<void*>(info); // back to cast as void
this->_infoy = reinterpret_cast<void*>(infoy); // back to cast as void
this->_vinfo = reinterpret_cast<void*>(vinfo); // back to cast as void
this->_vinfoy = reinterpret_cast<void*>(vinfoy); // back to cast as void
this->_weightinfo = reinterpret_cast<void*>(weightinfo); // back to cast as void
POP_RANGE("MDnew",MDnew,2);
if(!this->_done_alloc){
this->_done_alloc = true;
if(A._wDev == _wDev && A._me == _me && (A._sharedA==0 || _sharedA==0)){ // if on same device and same thread, just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",0);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
// Init();
// this->_done_equil=1;
}
else if(A._wDev == _wDev && A._sharedA!=0 && _sharedA!=0){ // if on same device and sharing memory, then just copy pointer
DEBUG_FPRINTF(stderr,"ATYPE%d\n",1);
_data = A._data;
_datay = A._datay;
_vdata = A._vdata;
_vdatay = A._vdatay;
_weight = A._weight;
_de = A._de;
_dopredict = A._dopredict;
Init();
this->_done_equil=1;
}
else{
DEBUG_FPRINTF(stderr,"ATYPE%d\n",2);
// Copy Matrix to from source GPU to this GPU
PUSH_RANGE("MDcopy",MDcopy,1);
//GpuData<T> *info = reinterpret_cast<GpuData<T>*>(_info); // cast void -> GpuData
double t0 = timer<double>();
if(A._data) cudaMalloc(&_data, A._m * A._n * sizeof(T)); // allocate on GPU
if(A._datay) cudaMalloc(&_datay, A._m * sizeof(T)); // allocate on GPU
if(A._vdata) cudaMalloc(&_vdata, A._mvalid * A._n * sizeof(T)); // allocate on GPU
if(A._vdatay) cudaMalloc(&_vdatay, A._mvalid * sizeof(T)); // allocate on GPU
if(A._weight) cudaMalloc(&_weight, A._m * sizeof(T)); // allocate on GPU
double t1 = timer<double>();
if(A._data) cudaMemcpyPeer(_data, _wDev, A._data, A._wDev, A._m * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._datay){
cudaMemcpyPeer(_datay, _wDev, A._datay, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
_dopredict=0;
}
else{
_dopredict=1;
}
if(A._vdata) cudaMemcpyPeer(_vdata, _wDev, A._vdata, A._wDev, A._mvalid * A._n * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._vdatay) cudaMemcpyPeer(_vdatay, _wDev, A._vdatay, A._wDev, A._mvalid * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._weight) cudaMemcpyPeer(_weight, _wDev, A._weight, A._wDev, A._m * sizeof(T)); // dest: _data destid: _wDev source: A._data sourceid: A._wDev
if(A._de) cudaMalloc(&_de, (A._m + A._n) * sizeof(T)); cudaMemcpyPeer(_de, _wDev, A._de, A._wDev, (A._m + A._n) * sizeof(T));
if(sharedA>0){
Init();
Equil(1);
}
double t2 = timer<double>();
#ifdef DEBUG
printf("Time to allocate the data matrix on the GPU: %f\n", t1-t0);
printf("Time to copy the data matrix to the GPU : %f\n", t2-t1);
#endif
POP_RANGE("MDcopy",MDcopy,1);
}
}
}
template <typename T>
MatrixDense<T>::MatrixDense(int me, int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(0, me, wDev, A){} // then assume not sharing memory
template <typename T>
MatrixDense<T>::MatrixDense(int wDev, const MatrixDense<T>& A)
: MatrixDense<T>(wDev, wDev, A){} // then assume thread=wDev for the new matrix (i.e. not input A)
template <typename T>
MatrixDense<T>::MatrixDense(const MatrixDense<T>& A)
: MatrixDense<T>(A._wDev, A){} // then assume same device as input A
template <typename T>
MatrixDense<T>::~MatrixDense() {
// return;//TODO: Some deconstructor issue FIXME. Segfaults after adding weights. Can't find issue.
checkwDev(_wDev);
CUDACHECK(cudaSetDevice(_wDev));
if(0){
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
GpuData<T> *infoy = reinterpret_cast<GpuData<T>*>(this->_infoy);
GpuData<T> *vinfo = reinterpret_cast<GpuData<T>*>(this->_vinfo);
GpuData<T> *vinfoy = reinterpret_cast<GpuData<T>*>(this->_vinfoy);
GpuData<T> *weightinfo = reinterpret_cast<GpuData<T>*>(this->_weightinfo);
if(info) delete info; this->_info = 0;
if(infoy) delete infoy; this->_infoy = 0;
if(vinfo) delete vinfo; this->_vinfo = 0;
if(vinfoy) delete vinfoy; this->_vinfoy = 0;
if(weightinfo) delete weightinfo; this->_weightinfo = 0;
}
// fprintf(stderr,"HERE1\n"); fflush(stderr);
if(0){ // Note that this frees these pointers as soon as MatrixDense constructor goes out of scope, and might want more fine-grained control over GPU memory if inside (say) high-level python API
// If 0 is used, then need to ensure user calls a finish() or something to free memory. If 0, also allows user to call (say) fit() or fitptr() multiple times
if (this->_done_init && _data) {
// fprintf(stderr,"Freeing _data: %p\n",(void*)_data); fflush(stderr);
cudaFree(_data);
this->_data = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE2\n"); fflush(stderr);
if (this->_done_init && _datay) {
// fprintf(stderr,"Freeing _datay: %p\n",(void*)_datay); fflush(stderr);
cudaFree(_datay);
this->_datay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE3\n"); fflush(stderr);
if (this->_done_init && _vdata) {
// fprintf(stderr,"Freeing _vdata: %p\n",(void*)_vdata); fflush(stderr);
cudaFree(_vdata);
this->_vdata = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE4\n"); fflush(stderr);
if (this->_done_init && _vdatay) {
// fprintf(stderr,"Freeing _vdatay: %p\n",(void*)_vdatay); fflush(stderr);
cudaFree(_vdatay);
this->_vdatay = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE5\n"); fflush(stderr);
if (this->_done_init && _weight) {
// fprintf(stderr,"Freeing _weight: %p\n",(void*)_weight); fflush(stderr);
cudaFree(_weight);
this->_weight = 0;
DEBUG_CUDA_CHECK_ERR();
}
// fprintf(stderr,"HERE6\n"); fflush(stderr);
if(this->_done_init && _de && !_sharedA){ // JONTODO: When sharedA=1, only free on sourceme thread and sourcewDev device (can store sourcethread for-- sourceme -- data and only free if on source thread)
// fprintf(stderr,"Freeing _de: %p\n",(void*)_weight); fflush(stderr);
cudaFree(_de);
this->_de=0;
DEBUG_CUDA_CHECK_ERR();
}
}
}
template <typename T>
int MatrixDense<T>::Init() {
DEBUG_EXPECT(!this->_done_init);
if (this->_done_init)
return 1;
this->_done_init = true;
CUDACHECK(cudaSetDevice(_wDev));
PUSH_RANGE("MDinit",MDinit,1);
POP_RANGE("MDinit",MDinit,1);
DEBUG_CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::GetTrainX(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_data){
if(datatype==1){
cudaMemcpy(*data, _data, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _data, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetTrainY(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_datay){
if(datatype==1){
cudaMemcpy(*data, _datay, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _datay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidX(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_vdata){
if(datatype==1){
cudaMemcpy(*data, _vdata, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdata, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetValidY(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_vdatay){
if(datatype==1){
cudaMemcpy(*data, _vdatay, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _vdatay, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::GetWeight(int datatype, size_t size, T**data) const {
CUDACHECK(cudaSetDevice(_wDev));
if(_weight){
if(datatype==1){
cudaMemcpy(*data, _weight, size* sizeof(T),cudaMemcpyDeviceToHost);
CUDA_CHECK_ERR();
}
else{
std::memcpy(*data, _weight, size * sizeof(T));
}
return(0);
}
else return(1);
}
template <typename T>
int MatrixDense<T>::Mul(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(cudaSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_m);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _data is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
template <typename T>
int MatrixDense<T>::Mulvalid(char trans, T alpha, const T *x, T beta, T *y) const {
DEBUG_EXPECT(this->_done_init);
if (!this->_done_init)
return 1;
CUDACHECK(cudaSetDevice(_wDev));
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
const cml::vector<T> x_vec = cml::vector_view_array<T>(x, this->_n);
cml::vector<T> y_vec = cml::vector_view_array<T>(y, this->_mvalid);
// Performs the matrix-vector operations y := alpha*A*x + beta*y or y := alpha*A'*x + beta*y where alpha and beta are scalars, x and y are vectors and A is an m by n matrix
// _vdata is A on GPU
//https://docs.oracle.com/cd/B19306_01/appdev.102/b14258/u_nla.htm#CIAFEAFG
if (_ord == ROW) {
cml::matrix<T, CblasRowMajor> A =
cml::matrix_view_array<T, CblasRowMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta,
&y_vec);
} else {
cml::matrix<T, CblasColMajor> A =
cml::matrix_view_array<T, CblasColMajor>(_vdata, this->_mvalid, this->_n);
cml::blas_gemv(hdl, OpToCublasOp(trans), alpha, &A, &x_vec, beta, &y_vec);
}
CUDA_CHECK_ERR();
return 0;
}
// col-major order (fortran) A, but still print as row major
template <typename T>
void printMatrix(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[row + col*lda];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
// row-major order (c) A printed as row major
template <typename T>
void printMatrix2(int m, int n, const T*A, int lda, const char* name)
{
printf("rows=%d cols=%d lda=%d\n",m,n,lda);
for(int row = 0 ; row < m ; row++){
for(int col = 0 ; col < n ; col++){
T Areg = A[col + row*n];
printf("%s(%d,%d) = %f\n", name, row+1, col+1, Areg);
}
}
}
/*
* How to compile (assume cuda is installed at /usr/local/cuda/)
* nvcc -c -I/usr/local/cuda/include svd_example.cpp
* g++ -fopenmp -o a.out svd_example.o -L/usr/local/cuda/lib64 -lcudart -lcublas -lcusolver
*
*/
inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, float *A, int lda, float *S, float *U, int ldu, float *VT, int ldvt, float *work, int lwork, float *rwork, int *devInfo){
return(cusolverDnSgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cusolverStatus_t cusolverDngesvd ( cusolverDnHandle_t handle, signed char jobu, signed char jobvt, int m, int n, double *A, int lda, double *S, double *U, int ldu, double *VT, int ldvt, double *work, int lwork, double *rwork, int *devInfo){
return(cusolverDnDgesvd(handle, jobu, jobvt, m, n, A, lda, S, U, ldu, VT, ldvt, work, lwork, rwork, devInfo));
}
inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc){
return(cublasSgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline cublasStatus_t cublasgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, const double *alpha, const double *A, int lda, const double *B, int ldb, const double *beta, double *C, int ldc){
return(cublasDgemm_v2(handle, transa, transb, m, n, k, alpha, A, lda, B, ldb, beta, C, ldc));
}
inline cublasStatus_t cublasdgmm(cublasHandle_t handle,
cublasSideMode_t mode,
int m,
int n,
const float *A,
int lda,
const float *x,
int incx,
float *C,
int ldc){
return(cublasSdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline cublasStatus_t cublasdgmm(cublasHandle_t handle,
cublasSideMode_t mode,
int m,
int n,
const double *A,
int lda,
const double *x,
int incx,
double *C,
int ldc){
return(cublasDdgmm(handle,
mode,
m,
n,
A,
lda,
x,
incx,
C,
ldc));
}
inline cublasStatus_t cublasnrm2(cublasHandle_t handle,
int n,
const double *x,
int incx,
double *result){
return(cublasDnrm2_v2(handle,
n,
x,
incx,
result));
}
inline cublasStatus_t cublasnrm2(cublasHandle_t handle,
int n,
const float *x,
int incx,
float *result){
return(cublasSnrm2_v2(handle,
n,
x,
incx,
result));
}
// // Each block transposes/copies a tile of TILE_DIM x TILE_DIM elements
// // using TILE_DIM x BLOCK_ROWS threads, so that each thread transposes
// // TILE_DIM/BLOCK_ROWS elements. TILE_DIM must be an integral multiple of BLOCK_ROWS
#define TILE_DIM 16
#define BLOCK_ROWS 16
// __global__ void transposeNaive(float *odata, float* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeNaive(double *odata, double* idata,
// int width, int height)
// {
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + width * yIndex;
// int index_out = yIndex + height * xIndex;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i] = idata[index_in+i*width];
// }
// }
// __global__ void transposeCoalesced(float *odata,
// float *idata, int width, int height)
// {
// __shared__ float tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// __global__ void transposeCoalesced(double *odata,
// double *idata, int width, int height)
// {
// __shared__ double tile[TILE_DIM][TILE_DIM];
// int xIndex = blockIdx.x*TILE_DIM + threadIdx.x;
// int yIndex = blockIdx.y*TILE_DIM + threadIdx.y;
// int index_in = xIndex + (yIndex)*width;
// xIndex = blockIdx.y * TILE_DIM + threadIdx.x;
// yIndex = blockIdx.x * TILE_DIM + threadIdx.y;
// int index_out = xIndex + (yIndex)*height;
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// tile[threadIdx.y+i][threadIdx.x] =
// idata[index_in+i*width];
// }
// __syncthreads();
// for (int i=0; i<TILE_DIM; i+=BLOCK_ROWS) {
// odata[index_out+i*height] =
// tile[threadIdx.x][threadIdx.y+i];
// }
// }
// in-place transpose for row-major matrix on device of A[m][n]
void cudaintranspose(float *odata, float *idata, int m, int n){
cudaError_t cudaStat1 = cudaSuccess;
cudaStat1 = cudaMemcpy(odata, idata, sizeof(float)*m*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
float const alpha(1.0);
float const beta(0.0);
cublasHandle_t handle;
cublasCreate(&handle);
cublasSgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
cublasDestroy(handle);
}
void cudaintranspose(double *odata, double *idata, int m, int n){
cudaError_t cudaStat1 = cudaSuccess;
cudaStat1 = cudaMemcpy(odata, idata, sizeof(double)*m*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
double const alpha(1.0);
double const beta(0.0);
cublasHandle_t handle;
cublasCreate(&handle);
cublasDgeam( handle, CUBLAS_OP_T, CUBLAS_OP_N, m, n, &alpha, idata, n, &beta, idata, m, odata, m );
cublasDestroy(handle);
}
#define MIN(a,b) ((a)<(b) ? (a) : (b))
template <typename T>
int MatrixDense<T>::svd1(void) {
fprintf(stderr,"begin svd inside0\n"); fflush(stderr); fflush(stdout);
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
Init();
fprintf(stderr,"begin svd inside\n"); fflush(stderr); fflush(stdout);
cusolverDnHandle_t cusolverH = NULL;
cublasHandle_t cublasH = NULL;
cublasStatus_t cublas_status = CUBLAS_STATUS_SUCCESS;
cusolverStatus_t cusolver_status = CUSOLVER_STATUS_SUCCESS;
cudaError_t cudaStat1 = cudaSuccess;
cudaError_t cudaStat2 = cudaSuccess;
cudaError_t cudaStat3 = cudaSuccess;
cudaError_t cudaStat4 = cudaSuccess;
cudaError_t cudaStat5 = cudaSuccess;
cudaError_t cudaStat6 = cudaSuccess;
int m = this->_m;
int n = this->_n;
// const int m = this->_m;
// const int n = this->_n;
int lda = m;
/* | 1 2 |
* A = | 4 5 |
* | 2 1 |
*/
unsigned char ord='r'; // TODO; should be inputted
// original device vector
T *d_A0;
d_A0 = this->_data;
// device vectors
T *d_A = NULL;
T *d_S = NULL;
T *d_U = NULL;
T *d_VT = NULL;
int *devInfo = NULL;
T *d_work = NULL;
T *d_rwork = NULL;
T *d_W = NULL; // W = S*VT
int lwork = 0;
int info_gpu = 0;
const T h_one = 1;
const T h_minus_one = -1;
double t0 = timer<double>();
// step 1: create cusolverDn/cublas handle
cusolver_status = cusolverDnCreate(&cusolverH);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
cublas_status = cublasCreate(&cublasH);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
fprintf(stderr,"HERE1\n"); fflush(stderr); fflush(stdout);
// step 2: copy A to device
// cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n);
// svd destroys d_A, so make copy for testing error // OPTMARK
cudaStat1 = cudaMalloc ((void**)&d_A , sizeof(T)*lda*n);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
int ldu=m; //lda;
int ldureal=n; // actual storage
int ldvt=n;
if(ord=='r'){
// transpose
// execution configuration parameters
//dim3 grid(n/TILE_DIM, lda/TILE_DIM), threads(TILE_DIM,BLOCK_ROWS);
// transposeCoalesced<<<grid, threads>>>(d_A, d_A0, n, lda);
// transposeNaive<<<grid, threads>>>(d_A, d_A0, n, lda);
cudaintranspose(d_A,d_A0,m,n); // OPTMARK
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
// below debug only for printMatrix2 to view, shouldn't actually swap for use.
if(0){
int temp=m;
m=n;
n=temp;
lda=m;
ldu=m; //lda;
ldureal=n; // actual storage
ldvt=n;
}
}
else{
d_A = d_A0;
}
fprintf(stderr,"HERE PRE\n"); fflush(stderr); fflush(stdout);
// old host side vectors
// T A[lda*n] = { 1.0, 4.0, 2.0, 2.0, 5.0, 1.0};
// GpuData<T> *info_A = reinterpret_cast<GpuData<T>*>(this->_info); // cast from void to GpuData
// T *A = const_cast<T*>(info_A->orig_data);
#if(0)
T A[lda*n]; // for debug
T U[ldureal*m]; // m-by-m unitary matrix
T VT[ldvt*n]; // n-by-n unitary matrix
T S[MIN(n,m)]; // singular value
#endif
// T S_exact[n] = {7.065283497082729, 1.040081297712078};
fprintf(stderr,"HERE POST\n"); fflush(stderr); fflush(stdout);
// now d_A has column-major order matrix
fprintf(stderr,"HERE2\n"); fflush(stderr); fflush(stdout);
#if(0) // debug
cudaStat1 = cudaMemcpy(A, d_A, sizeof(T)*lda*n, cudaMemcpyDeviceToHost);
assert(cudaSuccess == cudaStat1);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
printf("A = (matlab base-1)\n");
printMatrix(m, n, A, lda, "A");
printf("=====\n");
printf("A = (matlab base-1)\n");
printMatrix2(m, n, A, lda, "A");
printf("=====\n");
#endif
fprintf(stderr,"HERE3\n"); fflush(stderr); fflush(stdout);
cudaStat2 = cudaMalloc ((void**)&d_S , sizeof(T)*MIN(n,m));
cudaStat3 = cudaMalloc ((void**)&d_U , sizeof(T)*ldureal*m);
cudaStat4 = cudaMalloc ((void**)&d_VT , sizeof(T)*ldvt*n);
cudaStat5 = cudaMalloc ((void**)&devInfo, sizeof(int));
cudaStat6 = cudaMalloc ((void**)&d_W , sizeof(T)*lda*n);
// assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
assert(cudaSuccess == cudaStat5);
assert(cudaSuccess == cudaStat6);
// host->device
// cudaStat1 = cudaMemcpy(d_A, A, sizeof(T)*lda*n, cudaMemcpyHostToDevice);
// assert(cudaSuccess == cudaStat1);
// step 3: query working space of SVD
//The dense matrices are assumed to be stored in column-major order in memory.
cusolver_status = cusolverDnDgesvd_bufferSize(
cusolverH,
m,
n,
&lwork );
assert (cusolver_status == CUSOLVER_STATUS_SUCCESS);
cudaStat1 = cudaMalloc((void**)&d_work , sizeof(T)*lwork);
assert(cudaSuccess == cudaStat1);
double t1 = timer<double>();
fprintf(stderr,"SVD init: %g\n",t1-t0); fflush(stderr); fflush(stdout);
// step 4: compute SVD
double t0c = timer<double>();
signed char jobu = 'A'; // all m columns of U
signed char jobvt = 'A'; // all n columns of VT
cusolver_status = cusolverDngesvd(
cusolverH,
jobu,
jobvt,
m,
n,
d_A,
lda,
d_S,
d_U,
ldu,
d_VT,
ldvt,
d_work,
lwork,
d_rwork,
devInfo);
cudaStat4 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost);
printf("after gesvd: info_gpu = %d\n", info_gpu); fflush(stdout);
assert(0 == info_gpu);
printf("=====\n"); fflush(stdout);
cudaStat1 = cudaDeviceSynchronize();
assert(cudaSuccess == cudaStat1);
fprintf(stderr,"BAD: %d\n",cusolver_status); fflush(stderr);
assert(CUSOLVER_STATUS_SUCCESS == cusolver_status);
double t1c = timer<double>();
fprintf(stderr,"SVD compute: %g\n",t1-t0); fflush(stderr); fflush(stdout);
#if(0)
/////////////////////////
// Copy solution device->host
double t0h = timer<double>();
cudaStat1 = cudaMemcpy(U , d_U , sizeof(T)*ldureal*m, cudaMemcpyDeviceToHost);
cudaStat2 = cudaMemcpy(VT, d_VT, sizeof(T)*ldvt*n, cudaMemcpyDeviceToHost);
cudaStat3 = cudaMemcpy(S , d_S , sizeof(T)*MIN(n,m), cudaMemcpyDeviceToHost);
assert(cudaSuccess == cudaStat1);
assert(cudaSuccess == cudaStat2);
assert(cudaSuccess == cudaStat3);
assert(cudaSuccess == cudaStat4);
if(0){ // debug
printf("S = (matlab base-1)\n");
printMatrix(n, 1, S, lda, "S");
printf("=====\n");
printf("U = (matlab base-1)\n");
printMatrix(m, m, U, ldureal, "U");
printf("=====\n");
printf("VT = (matlab base-1)\n");
printMatrix(n, n, VT, ldvt, "VT");
printf("=====\n");
/////////////////////////
// measure error of singular value
// T ds_sup = 0;
// for(int j = 0; j < n; j++){
// T err = fabs( S[j] - S_exact[j] );
// ds_sup = (ds_sup > err)? ds_sup : err;
// }
// printf("|S - S_exact| = %E \n", ds_sup);
}
double t1h = timer<double>();
fprintf(stderr,"SVD back to host: %g\n",t1h-t0h); fflush(stderr); fflush(stdout);
#endif
/////////////////////////
// now check
double t0c1 = timer<double>();
// step 5: |A - U*S*VT|
// W = S*VT
cublas_status = cublasdgmm(
cublasH,
CUBLAS_SIDE_LEFT,
n,
n,
d_VT,
ldvt,
d_S,
1,
d_W,
lda);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
double t1c1 = timer<double>();
fprintf(stderr,"SVD check1: %g\n",t1c1-t0c1); fflush(stderr); fflush(stdout);
// A := -U*W + A
double t0c2 = timer<double>();
cudaStat1 = cudaMemcpy(d_A, d_A0, sizeof(T)*lda*n, cudaMemcpyDeviceToDevice); // copy because original d_A was destroyed
assert(cudaSuccess == cudaStat1);
cublas_status = cublasgemm(
cublasH,
CUBLAS_OP_N, // U
CUBLAS_OP_N, // W
m, // number of rows of A
n, // number of columns of A
n, // number of columns of U
&h_minus_one, /* host pointer */
d_U, // U
ldu,
d_W, // W
lda,
&h_one, /* hostpointer */
d_A,
lda);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
double t1c2 = timer<double>();
fprintf(stderr,"SVD check2: %g\n",t1c2-t0c2); fflush(stderr); fflush(stdout);
double t0c3 = timer<double>();
T dR_fro = 0.0;
cublas_status = cublasnrm2(
cublasH, lda*n, d_A, 1, &dR_fro);
assert(CUBLAS_STATUS_SUCCESS == cublas_status);
printf("|A - U*S*VT| = %E \n", dR_fro); fflush(stdout);
double t1c3 = timer<double>();
fprintf(stderr,"SVD check3: %g\n",t1c3-t0c3); fflush(stderr); fflush(stdout);
// free resources
double t0f = timer<double>();
//if (d_A ) cudaFree(d_A);
if (d_S ) cudaFree(d_S);
if (d_U ) cudaFree(d_U);
if (d_VT ) cudaFree(d_VT);
if (devInfo) cudaFree(devInfo);
if (d_work ) cudaFree(d_work);
if (d_rwork) cudaFree(d_rwork);
if (d_W ) cudaFree(d_W);
if (cublasH ) cublasDestroy(cublasH);
if (cusolverH) cusolverDnDestroy(cusolverH);
// cudaDeviceReset();
double t1f = timer<double>();
fprintf(stderr,"SVD free: %g\n",t1f-t0f); fflush(stderr); fflush(stdout);
fprintf(stderr,"end svd inside\n"); fflush(stderr); fflush(stdout);
return 0;
}
// Equilibration (precondition) matrix using Sinkhorn Knopp method wrapped to allow any norm
// See https://arxiv.org/pdf/1610.03871.pdf for more information
template <typename T>
int MatrixDense<T>::Equil(bool equillocal) {
DEBUG_ASSERT(this->_done_init);
if (!this->_done_init)
return 1;
if (this->_done_equil) return 0;
else this->_done_equil=1;
CUDACHECK(cudaSetDevice(_wDev));
// Extract cublas handle from _info.
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
T *d = _de;
T *e = d + this->_m;
// Number of elements in matrix.
size_t num_el = this->_m * this->_n;
// Create bit-vector with signs of entries in A and then let A = f(A),
// where f = |A| or f = |A|.^2.
unsigned char *sign;
size_t num_sign_bytes = (num_el + 7) / 8;
cudaMalloc(&sign, num_sign_bytes);
CUDA_CHECK_ERR();
size_t num_chars = num_el / 8;
size_t grid_size = cml::calc_grid_dim(num_chars, cml::kBlockSize);
if(equillocal){
// Fill sign bits, assigning each thread a multiple of 8 elements.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SquareF<T>());
} else {
__SetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// If numel(A) is not a multiple of 8, then we need to set the last couple
// of sign bits too.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SquareF<T>());
} else {
__SetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, AbsF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Perform Sinkhorn-Knopp equilibration to obtain a doubly stochastic matrix.
SinkhornKnopp(this, d, e, equillocal);
wrapcudaDeviceSynchronize();
if(equillocal){
// Transform A = sign(A) .* sqrt(A) if 2-norm equilibration was performed,
// or A = sign(A) .* A if the 1-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
SqrtF<T>());
} else {
__UnSetSign<<<grid_size, cml::kBlockSize>>>(_data, sign, num_chars,
IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Deal with last few entries if num_el is not a multiple of 8.
if (num_el > num_chars * 8) {
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, SqrtF<T>());
} else {
__UnSetSignSingle<<<1, 1>>>(_data + num_chars * 8, sign + num_chars,
num_el - num_chars * 8, IdentityF<T>());
}
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
}
// Compute D := sqrt(D), E := sqrt(E), if 2-norm was equilibrated.
if (kNormEquilibrate == kNorm2 || kNormEquilibrate == kNormFro) {
thrust::transform(thrust::device_pointer_cast(d),
thrust::device_pointer_cast(d + this->_m),
thrust::device_pointer_cast(d), SqrtF<T>());
thrust::transform(thrust::device_pointer_cast(e),
thrust::device_pointer_cast(e + this->_n),
thrust::device_pointer_cast(e), SqrtF<T>());
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
}
// Compute A := D * A * E.
MultDiag(d, e, this->_m, this->_n, _ord, _data);
wrapcudaDeviceSynchronize();
CUDA_CHECK_ERR();
// Scale A to have norm of 1 (in the kNormNormalize norm).
T normA = NormEst(hdl, kNormNormalize, *this);
CUDA_CHECK_ERR();
wrapcudaDeviceSynchronize();
cml::vector<T> a_vec = cml::vector_view_array(_data, num_el);
cml::vector_scale(&a_vec, 1 / normA);
wrapcudaDeviceSynchronize();
// Scale d and e to account for normalization of A.
cml::vector<T> d_vec = cml::vector_view_array<T>(d, this->_m);
cml::vector<T> e_vec = cml::vector_view_array<T>(e, this->_n);
cml::vector_scale(&d_vec, 1 / sqrt(normA));
cml::vector_scale(&e_vec, 1 / sqrt(normA));
wrapcudaDeviceSynchronize();
DEBUG_PRINTF("norm A = %e, normd = %e, norme = %e\n", normA,
cml::blas_nrm2(hdl, &d_vec), cml::blas_nrm2(hdl, &e_vec));
cudaFree(sign);
CUDA_CHECK_ERR();
return 0;
}
// This example computes several statistical properties of a data
// series in a single reduction. The algorithm is described in detail here:
// http://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm
//
// Thanks to Joseph Rhoads for contributing this example
// structure used to accumulate the moments and other
// statistical properties encountered so far.
template <typename T>
struct summary_stats_data
{
T n;
T min;
T max;
T mean;
T M2;
T M3;
T M4;
// initialize to the identity element
void initialize()
{
n = mean = M2 = M3 = M4 = 0;
min = std::numeric_limits<T>::max();
max = std::numeric_limits<T>::min();
}
T variance() { return M2 / (n - 1); }
T variance_n() { return M2 / n; }
T skewness() { return std::sqrt(n) * M3 / std::pow(M2, (T) 1.5); }
T kurtosis() { return n * M4 / (M2 * M2); }
};
// stats_unary_op is a functor that takes in a value x and
// returns a variace_data whose mean value is initialized to x.
template <typename T>
struct summary_stats_unary_op
{
__host__ __device__
summary_stats_data<T> operator()(const T& x) const
{
summary_stats_data<T> result;
result.n = 1;
result.min = x;
result.max = x;
result.mean = x;
result.M2 = 0;
result.M3 = 0;
result.M4 = 0;
return result;
}
};
// summary_stats_binary_op is a functor that accepts two summary_stats_data
// structs and returns a new summary_stats_data which are an
// approximation to the summary_stats for
// all values that have been agregated so far
template <typename T>
struct summary_stats_binary_op
: public thrust::binary_function<const summary_stats_data<T>&,
const summary_stats_data<T>&,
summary_stats_data<T> >
{
__host__ __device__
summary_stats_data<T> operator()(const summary_stats_data<T>& x, const summary_stats_data <T>& y) const
{
summary_stats_data<T> result;
// precompute some common subexpressions
T n = x.n + y.n;
T n2 = n * n;
T n3 = n2 * n;
T delta = y.mean - x.mean;
T delta2 = delta * delta;
T delta3 = delta2 * delta;
T delta4 = delta3 * delta;
//Basic number of samples (n), min, and max
result.n = n;
result.min = thrust::min(x.min, y.min);
result.max = thrust::max(x.max, y.max);
result.mean = x.mean + delta * y.n / n;
result.M2 = x.M2 + y.M2;
result.M2 += delta2 * x.n * y.n / n;
result.M3 = x.M3 + y.M3;
result.M3 += delta3 * x.n * y.n * (x.n - y.n) / n2;
result.M3 += (T) 3.0 * delta * (x.n * y.M2 - y.n * x.M2) / n;
result.M4 = x.M4 + y.M4;
result.M4 += delta4 * x.n * y.n * (x.n * x.n - x.n * y.n + y.n * y.n) / n3;
result.M4 += (T) 6.0 * delta2 * (x.n * x.n * y.M2 + y.n * y.n * x.M2) / n2;
result.M4 += (T) 4.0 * delta * (x.n * y.M3 - y.n * x.M3) / n;
return result;
}
};
template <typename Iterator>
void print_range(const std::string& name, Iterator first, Iterator last)
{
typedef typename std::iterator_traits<Iterator>::value_type T;
std::cout << name << ": ";
thrust::copy(first, last, std::ostream_iterator<T>(std::cout, " "));
std::cout << "\n";
}
template<typename T>
struct absolute_value : public thrust::unary_function<T,T>
{
__host__ __device__ T operator()(const T &x) const
{
return x < T(0) ? -x : x;
}
};
// --- Operator for testing nan values
template<typename T>
struct isnan_test {
__host__ __device__ bool operator()(const T a) const {
return isnan(a) || isinf(a);
}
};
// check properties of input data
template <typename T>
int MatrixDense<T>::Stats(int intercept, T *min, T *max, T *mean, T *var, T *sd, T *skew, T *kurt, T &lambda_max0)
{
CUDACHECK(cudaSetDevice(_wDev));
if(_data!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_data);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_data+this->_m*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data matrix (trainX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_datay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_datay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_datay+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Data training predictions/labels (trainY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdata!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdata);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdata+this->_mvalid*this->_n);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data matrix (validX) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_vdatay!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_vdatay);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_vdatay+this->_mvalid);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Validation Data training predictions/labels (validY) has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
if(_weight!=NULL) {// check for nan or inf in data
thrust::device_ptr<T> begin = thrust::device_pointer_cast(_weight);
thrust::device_ptr<T> end = thrust::device_pointer_cast(_weight+this->_m);
bool h_result = thrust::transform_reduce(begin, end, isnan_test<T>(), 0, thrust::plus<bool>());
if(h_result==true){
fprintf(stderr,"Weight Training Data has nan/inf or missing was not encoded\n");
fflush(stderr);
exit(1);
}
}
// nothing else to do if _datay==NULL
if(_datay==NULL) return(0);
// setup arguments
summary_stats_unary_op<T> unary_op;
summary_stats_binary_op<T> binary_op;
summary_stats_data<T> init;
init.initialize();
int len=0;
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> dataybegin=thrust::device_pointer_cast(_datay);
len=this->_m;
thrust::device_ptr<T> datayend=thrust::device_pointer_cast(_datay+len);
// compute summary statistics
summary_stats_data<T> resulty = thrust::transform_reduce(dataybegin, datayend, unary_op, init, binary_op);
min[0]=resulty.min;
max[0]=resulty.max;
mean[0]=resulty.mean;
var[0]=resulty.variance();
sd[0]=std::sqrt(resulty.variance_n());
skew[0]=resulty.skewness();
kurt[0]=resulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Train*****"<<std::endl;
// print_range("The data", dataybegin, datayend);
std::cout <<"Count : "<< resulty.n << std::endl;
std::cout <<"Minimum : "<< min[0]<<std::endl;
std::cout <<"Maximum : "<< max[0]<<std::endl;
std::cout <<"Mean : "<< mean[0]<< std::endl;
std::cout <<"Variance : "<< var[0]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[0]<< std::endl;
std::cout <<"Skewness : "<< skew[0]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[0]<< std::endl;
#endif
// cast GPU pointer as thrust pointer
thrust::device_ptr<T> vdataybegin=thrust::device_pointer_cast(_vdatay);
len=this->_mvalid;
thrust::device_ptr<T> vdatayend=thrust::device_pointer_cast(_vdatay+len);
// compute summary statistics
summary_stats_data<T> vresulty = thrust::transform_reduce(vdataybegin, vdatayend, unary_op, init, binary_op);
min[1]=vresulty.min;
max[1]=vresulty.max;
mean[1]=vresulty.mean;
var[1]=vresulty.variance();
sd[1]=std::sqrt(vresulty.variance_n());
skew[1]=vresulty.skewness();
kurt[1]=vresulty.kurtosis();
#ifdef DEBUG
std::cout <<"******Summary Statistics of Response Valid*****"<<std::endl;
// print_range("The data", vdataybegin, vdatayend);
std::cout <<"Count : "<< vresulty.n << std::endl;
std::cout <<"Minimum : "<< min[1]<<std::endl;
std::cout <<"Maximum : "<< max[1]<<std::endl;
std::cout <<"Mean : "<< mean[1]<< std::endl;
std::cout <<"Variance : "<< var[1]<< std::endl;
std::cout <<"Standard Deviation : "<< sd[1]<< std::endl;
std::cout <<"Skewness : "<< skew[1]<< std::endl;
std::cout <<"Kurtosis : "<< kurt[1]<< std::endl;
#endif
if(1){ // normal usage
// Get Cublas handle
GpuData<T> *info = reinterpret_cast<GpuData<T>*>(this->_info);
cublasHandle_t hdl = info->handle;
// Set up views for raw vectors.
cml::vector<T> y_vec = cml::vector_view_array(_datay, this->_m); // b
cml::vector<T> weight_vec;
if(_weight) weight_vec = cml::vector_view_array(_weight, this->_m); // weight
else{
weight_vec = cml::vector_calloc<T>(this->_m); // weight make up
cml::vector_add_constant(&weight_vec, static_cast<T>(1.0)); // make unity weights
}
cml::vector<T> ytemp = cml::vector_calloc<T>(this->_m); // b
cml::vector<T> xtemp = cml::vector_calloc<T>(this->_n); // x
cml::vector_memcpy(&ytemp, &y_vec); // y_vec->ytemp
cml::vector_add_constant(&ytemp, -static_cast<T>(intercept)*mean[0]); // ytemp -> ytemp - intercept*mean[0]
cml::vector_mul(&ytemp,&weight_vec); // ytemp*weight -> ytemp
// Compute A^T . b
if (_ord == MatrixDense<T>::ROW) {
const cml::matrix<T, CblasRowMajor> A = cml::matrix_view_array<T, CblasRowMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
else{
const cml::matrix<T, CblasColMajor> A = cml::matrix_view_array<T, CblasColMajor>(_data, this->_m, this->_n); // just view
cml::blas_gemv(hdl, CUBLAS_OP_T, static_cast<T>(1.), &A, &ytemp, static_cast<T>(0.), &xtemp); // A.ytemp -> xtemp
}
thrust::device_ptr<T> dev_ptr = thrust::device_pointer_cast(&xtemp.data[0]);
lambda_max0 = thrust::transform_reduce(thrust::device,
dev_ptr, dev_ptr + this->_n-intercept,
absolute_value<T>(),
static_cast<T>(0.0),
thrust::maximum<T>());
}
else{
lambda_max0 = 7000; // test
}
CUDA_CHECK_ERR();
return 0;
}
////////////////////////////////////////////////////////////////////////////////
/////////////////////// Equilibration Helpers //////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
namespace {
// Estimates norm of A. norm_type should either be kNorm2 or kNormFro.
template <typename T>
T NormEst(cublasHandle_t hdl, NormTypes norm_type, const MatrixDense<T>& A) {
switch (norm_type) {
case kNorm2: {
return Norm2Est(hdl, &A);
}
case kNormFro: {
const cml::vector<T> a = cml::vector_view_array(A.Data(),
A.Rows() * A.Cols());
return cml::blas_nrm2(hdl, &a) / std::sqrt(std::min(A.Rows(), A.Cols()));
}
case kNorm1:
// 1-norm normalization doens't make make sense since it treats rows and
// columns differently.
default:
ASSERT(false);
return static_cast<T>(0.);
}
}
// Performs A := D * A * E for A in row major
template <typename T>
void __global__ __MultRow(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t / n] * e[t % n];
}
// Performs A := D * A * E for A in col major
template <typename T>
void __global__ __MultCol(size_t m, size_t n, const T *d, const T *e, T *data) {
size_t tid = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t t = tid; t < m * n; t += gridDim.x * blockDim.x)
data[t] *= d[t % m] * e[t / m];
}
template <typename T>
void MultDiag(const T *d, const T *e, size_t m, size_t n,
typename MatrixDense<T>::Ord ord, T *data) {
if (ord == MatrixDense<T>::ROW) {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultRow<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
} else {
size_t grid_dim_row = cml::calc_grid_dim(m * n, cml::kBlockSize);
__MultCol<<<grid_dim_row, cml::kBlockSize>>>(m, n, d, e, data);
}
}
} // namespace
// Explicit template instantiation.
#if !defined(H2O4GPU_DOUBLE) || H2O4GPU_DOUBLE==1
template class MatrixDense<double>;
#endif
#if !defined(H2O4GPU_SINGLE) || H2O4GPU_SINGLE==1
template class MatrixDense<float>;
#endif
// upload data function. Uploads to a single GPU.
// mimics otherwise similar MatrixDense constructor, but has no destruction of uploaded data pointers
template <typename T>
int makePtr_dense(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord, const T *data, const T *datay, const T *vdata, const T *vdatay, const T *weight, T **_data, T **_datay, T **_vdata, T **_vdatay, T **_weight){
checkwDev(wDev);
CUDACHECK(cudaSetDevice(wDev));
DEBUG_FPRINTF(stderr,"makePtr_dense: %d\n",0);
#ifdef DEBUG
// CUDACHECK(cudaSetDeviceFlags(cudaDeviceMapHost)); // TODO: MapHostMemory
cudaDeviceProp props;
CUDACHECK(cudaGetDeviceProperties(&props, wDev));
DEBUG_FPRINTF(stderr,"Using: Compute %d.%d CUDA device: [%s] with id=%2d\n", props.major, props.minor, props.name,wDev);
#endif
// Copy Matrix to GPU (unlike CPU case, cannot copy just pointer because always assume input is CPU and output is GPU)
double t0 = timer<double>();
PUSH_RANGE("MDsendsource",MDsendsource,1);
if(data){
CUDACHECK(cudaMalloc(_data, m * n * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_data, data, m * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_data: %p\n",(void*)*_data); fflush(stderr);
}
else *_data=NULL;
if(datay){
CUDACHECK(cudaMalloc(_datay, m * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_datay, datay, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_datay: %p\n",(void*)*_datay); fflush(stderr);
}
else *_datay=NULL;
if(vdata){
CUDACHECK(cudaMalloc(_vdata, mValid * n * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_vdata, vdata, mValid * n * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdata: %p\n",(void*)*_vdata); fflush(stderr);
}
else *_vdata=NULL;
if(vdatay){
CUDACHECK(cudaMalloc(_vdatay, mValid * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_vdatay, vdatay, mValid * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
// fprintf(stderr,"_vdatay: %p\n",(void*)*_vdatay); fflush(stderr);
}
else *_vdatay=NULL;
// fprintf(stderr,"weight=%p\n",weight); fflush(stderr);
if(weight){
CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU
CUDACHECK(cudaMemcpy(*_weight, weight, m * sizeof(T),cudaMemcpyHostToDevice)); // copy from orig CPU data to GPU
}
else{
DEBUG_FPRINTF(stderr,"making up unity weights: %d\n",m);
CUDACHECK(cudaMalloc(_weight, m * sizeof(T))); // allocate on GPU
thrust::device_ptr<T> dev_ptr=thrust::device_pointer_cast(static_cast<T*>(*_weight));
T fill_value=1.0;
thrust::fill(dev_ptr, dev_ptr + m, fill_value);
// fprintf(stderr,"_weight: %p\n",(void*)*_weight); fflush(stderr);
}
POP_RANGE("MDsendsource",MDsendsource,1);
double t2 = timer<double>();
DEBUG_FPRINTF(stdout,"Time to allocate and copy the data matrix on the GPU: %f\n", t2-t0);
cudaDeviceSynchronize();
DEBUG_FPRINTF(stderr,"pointer data %p\n",(void*)*_data);
DEBUG_FPRINTF(stderr,"pointer datay %p\n",(void*)*_datay);
DEBUG_FPRINTF(stderr,"pointer vdata %p\n",(void*)*_vdata);
DEBUG_FPRINTF(stderr,"pointer vdaty %p\n",(void*)*_vdatay);
DEBUG_FPRINTF(stderr,"pointer weight %p\n",(void*)*_weight);
return(0);
}
template int makePtr_dense<double>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const double *data, const double *datay, const double *vdata, const double *vdatay, const double *weight,
double **_data, double **_datay, double **_vdata, double **_vdatay, double **_weight);
template int makePtr_dense<float>(int sharedA, int me, int wDev, size_t m, size_t n, size_t mValid, const char ord,
const float *data, const float *datay, const float *vdata, const float *vdatay, const float *weight,
float **_data, float **_datay, float **_vdata, float **_vdatay, float **_weight);
template <typename T>
int modelFree1(T *aptr){
if(aptr!=NULL){
// for now, freed during ~
//cudaFree(aptr);
//CUDA_CHECK_ERR();
}
return(0);
}
template int modelFree1<float>(float *aptr);
template int modelFree1<double>(double *aptr);
} // namespace h2o4gpu
int modelfree1_double(double *aptr){
return h2o4gpu::modelFree1<double>(aptr);
}
int modelfree1_float(float *aptr){
return h2o4gpu::modelFree1<float>(aptr);
}
int make_ptr_double(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const double* trainX, const double* trainY, const double* validX, const double* validY, const double *weight,
double**a, double**b, double**c, double**d, double **e) {
return h2o4gpu::makePtr_dense<double>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
int make_ptr_float(int sharedA, int sourceme, int sourceDev, size_t mTrain, size_t n, size_t mValid, const char ord,
const float* trainX, const float* trainY, const float* validX, const float* validY, const float *weight,
float**a, float**b, float**c, float**d, float **e) {
return h2o4gpu::makePtr_dense<float>(sharedA, sourceme, sourceDev, mTrain, n, mValid, ord, trainX, trainY, validX, validY, weight, a, b, c, d, e);
}
|
d18589d8379248aa7a7b51f18f0143b2c8c4bc1b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdio>
using namespace std;
__device__ double xmax = 3.14;
double xmax_;
__global__
void fun(int N)
{
double *x = new double[N];
double *y = new double[N];
if (x) {
x[N-1] = 3.1415926;
y[N-1] = 2.7182818;
//printf("x[N-1] = %f\n", x[N-1]);
}
else
printf("heap overflow\n");
}
int main()
{
int N = 1024;
size_t heapsize;
hipDeviceSetLimit(hipLimitMallocHeapSize, 41943040 + 96*1024*1024);
hipDeviceGetLimit(&heapsize, hipLimitMallocHeapSize);
cout << "heapsize = " << heapsize << endl;
hipLaunchKernelGGL(( fun), dim3(1024*1024/256),dim3(256), 0, 0, N);
hipDeviceSynchronize();
xmax_ = 5.;
}
| d18589d8379248aa7a7b51f18f0143b2c8c4bc1b.cu | #include <iostream>
#include <cstdio>
using namespace std;
__device__ double xmax = 3.14;
double xmax_;
__global__
void fun(int N)
{
double *x = new double[N];
double *y = new double[N];
if (x) {
x[N-1] = 3.1415926;
y[N-1] = 2.7182818;
//printf("x[N-1] = %f\n", x[N-1]);
}
else
printf("heap overflow\n");
}
int main()
{
int N = 1024;
size_t heapsize;
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 41943040 + 96*1024*1024);
cudaDeviceGetLimit(&heapsize, cudaLimitMallocHeapSize);
cout << "heapsize = " << heapsize << endl;
fun<<<1024*1024/256,256>>>(N);
cudaDeviceSynchronize();
xmax_ = 5.;
}
|
e9c211446c4cfde7204f16f3ad68c9fcac326d62.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
paddle::operators::math::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
hipLaunchKernelGGL(( SparseAdamCUDAKernelREG<T, MPDType>)
, dim3(blocks), dim3(threads), 0, dev_ctx.stream(),
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
hipLaunchKernelGGL(( UpdateBetaPow<MPDType>), dim3(1), dim3(32), 0, dev_ctx.stream(),
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
}
| e9c211446c4cfde7204f16f3ad68c9fcac326d62.cu | // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/selected_rows_functor.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/common/amp_type_traits.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/core/kernel_registry.h"
#include "paddle/phi/kernels/copy_kernel.h"
#include "paddle/phi/kernels/funcs/adam_functors.h"
#include "paddle/phi/kernels/funcs/for_range.h"
#include "paddle/phi/kernels/selected_rows/adam_kernel.h"
namespace phi {
namespace sr {
template <typename T>
__global__ void UpdateBetaPow(T beta1,
T beta2,
const T* beta1_pow_,
const T* beta2_pow_,
T* beta1_pow_out,
T* beta2_pow_out) {
*beta1_pow_out = beta1 * beta1_pow_[0];
*beta2_pow_out = beta2 * beta2_pow_[0];
}
template <typename T, typename MT>
__global__ void SparseAdamCUDAKernelREG(MT beta1,
MT beta2,
MT epsilon,
const MT beta1_pow,
const MT beta2_pow,
const MT* mom1_,
MT* mom1_out_,
const MT* mom2_,
MT* mom2_out_,
const MT* lr_,
const T* grad_,
const T* param_,
T* param_out_,
const MT* master_param,
MT* master_param_out,
const int64_t* rows_,
int64_t row_numel,
int64_t row_count,
bool lazy_mode,
int ndim) {
int id = blockIdx.x * blockDim.x + threadIdx.x;
MT lr = *lr_;
for (; id < ndim; id += blockDim.x * gridDim.x) {
auto row_idx =
phi::funcs::BinarySearch<int64_t>(rows_, row_count, id / row_numel);
if (lazy_mode && row_idx < 0) {
return;
} else {
MT mom1 = mom1_[id];
MT mom2 = mom2_[id];
MT p = master_param ? master_param[id] : static_cast<MT>(param_[id]);
MT g = row_idx >= 0
? static_cast<MT>(grad_[row_idx * row_numel + id % row_numel])
: static_cast<MT>(0);
mom1 = beta1 * mom1 + (static_cast<MT>(1.0) - beta1) * g;
mom2 = beta2 * mom2 + (static_cast<MT>(1.0) - beta2) * g * g;
MT denom =
(sqrt(mom2) / sqrt(static_cast<MT>(1.0) - beta2_pow)) + epsilon;
p += (mom1 / denom) * (-(lr / (static_cast<MT>(1.0) - beta1_pow)));
// Write back to global memory
mom1_out_[id] = mom1;
mom2_out_[id] = mom2;
param_out_[id] = static_cast<T>(p);
if (master_param_out) {
master_param_out[id] = p;
}
}
}
}
template <typename T, typename Context>
void AdamDenseParamSparseGradKernel(
const Context& dev_ctx,
const DenseTensor& param,
const SelectedRows& grad,
const DenseTensor& learning_rate,
const DenseTensor& moment1,
const DenseTensor& moment2,
const DenseTensor& beta1_pow,
const DenseTensor& beta2_pow,
const paddle::optional<DenseTensor>& master_param,
const paddle::optional<DenseTensor>& skip_update,
const Scalar& beta1,
const Scalar& beta2,
const Scalar& epsilon,
bool lazy_mode,
int64_t min_row_size_to_use_multithread,
bool multi_precision,
bool use_global_beta_pow,
DenseTensor* param_out,
DenseTensor* moment1_out,
DenseTensor* moment2_out,
DenseTensor* beta1_pow_out,
DenseTensor* beta2_pow_out,
DenseTensor* master_param_outs) {
using MPDType = typename phi::dtype::MPTypeTrait<T>::Type;
VLOG(4) << "use_global_beta_pow:" << use_global_beta_pow;
bool skip_update_ = false;
if (skip_update.is_initialized()) {
PADDLE_ENFORCE_EQ(
skip_update->numel(),
1,
errors::InvalidArgument("Input(SkipUpdate) size must be 1, but get %d",
skip_update->numel()));
std::vector<bool> skip_update_vec;
paddle::framework::TensorToVector(*skip_update, dev_ctx, &skip_update_vec);
skip_update_ = skip_update_vec[0];
}
// skip_update=true, just copy input to output, and TensorCopy will call
// mutable_data
if (skip_update_) {
VLOG(4) << "Adam skip update";
phi::Copy(dev_ctx, param, dev_ctx.GetPlace(), false, param_out);
phi::Copy(dev_ctx, moment1, dev_ctx.GetPlace(), false, moment1_out);
phi::Copy(dev_ctx, moment2, dev_ctx.GetPlace(), false, moment2_out);
phi::Copy(dev_ctx, beta1_pow, beta1_pow.place(), false, beta1_pow_out);
phi::Copy(dev_ctx, beta2_pow, beta2_pow.place(), false, beta2_pow_out);
return;
}
MPDType beta1_ = beta1.to<MPDType>();
MPDType beta2_ = beta2.to<MPDType>();
MPDType epsilon_ = epsilon.to<MPDType>();
VLOG(3) << "beta1_pow.numel() : " << beta1_pow.numel()
<< "beta2_pow.numel() : " << beta2_pow.numel();
VLOG(3) << "param.numel(): " << param.numel();
PADDLE_ENFORCE_EQ(
beta1_pow_out->numel(),
1,
errors::InvalidArgument("beta1 pow output size should be 1, but received "
"value is:%d.",
beta1_pow_out->numel()));
PADDLE_ENFORCE_EQ(
beta2_pow_out->numel(),
1,
errors::InvalidArgument("beta2 pow output size should be 1, but received "
"value is:%d.",
beta2_pow_out->numel()));
const MPDType* master_in_data =
multi_precision ? master_param->data<MPDType>() : nullptr;
MPDType* master_out_data =
multi_precision ? dev_ctx.template Alloc<MPDType>(master_param_outs)
: nullptr;
if (grad.rows().size() == 0) {
VLOG(3) << "grad row size is 0!!";
return;
}
std::vector<int64_t> cpu_rows(grad.rows().begin(), grad.rows().end());
bool is_strict_sorted = true;
for (size_t i = 1; i < cpu_rows.size(); ++i) {
if (cpu_rows[i - 1] >= cpu_rows[i]) {
is_strict_sorted = false;
break;
}
}
phi::SelectedRows tmp_grad_merge;
const phi::SelectedRows* grad_merge_ptr;
if (is_strict_sorted) {
grad_merge_ptr = &grad;
} else {
// merge duplicated rows if any.
// The rows of grad_merge have been sorted inside MergeAdd functor
paddle::operators::math::scatter::MergeAdd<Context, T> merge_func;
merge_func(dev_ctx, grad, &tmp_grad_merge, true);
grad_merge_ptr = &tmp_grad_merge;
}
auto& grad_merge = *grad_merge_ptr;
auto& grad_tensor = grad_merge.value();
const T* grad_data = grad_tensor.template data<T>();
auto* grad_merge_rows = &grad_merge.rows();
paddle::framework::MixVector<int64_t> mixv_grad_merge_rows(grad_merge_rows);
const int64_t* rows = mixv_grad_merge_rows.Data(dev_ctx.GetPlace());
auto row_numel = grad_tensor.numel() / grad_merge.rows().size();
if (beta1_pow.place() == CPUPlace() && beta2_pow.place() == CPUPlace()) {
int threads = 512;
int ndim = param.numel();
int blocks = (ndim + threads - 1) / threads;
SparseAdamCUDAKernelREG<T, MPDType>
<<<blocks, threads, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
epsilon_,
*beta1_pow.data<MPDType>(),
*beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode,
ndim);
if (!use_global_beta_pow) {
// Update with cpu
dev_ctx.template HostAlloc<MPDType>(beta1_pow_out)[0] =
beta1_ * beta1_pow.data<MPDType>()[0];
dev_ctx.template HostAlloc<MPDType>(beta2_pow_out)[0] =
beta2_ * beta2_pow.data<MPDType>()[0];
}
} else {
funcs::SparseAdamFunctor<T, funcs::GPUAdam, MPDType> functor(
beta1_,
beta2_,
epsilon_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
moment1.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment1_out),
moment2.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(moment2_out),
learning_rate.data<MPDType>(),
grad_data,
param.data<T>(),
dev_ctx.template Alloc<T>(param_out),
master_in_data,
master_out_data,
rows,
row_numel,
grad_merge.rows().size(),
lazy_mode);
// FIXME(minqiyang): remove BinarySearch in GPU later
funcs::ForRange<Context> for_range(dev_ctx, param.numel());
for_range(functor);
if (!use_global_beta_pow) {
// update beta1 and beta2
UpdateBetaPow<MPDType><<<1, 32, 0, dev_ctx.stream()>>>(
beta1_,
beta2_,
beta1_pow.data<MPDType>(),
beta2_pow.data<MPDType>(),
dev_ctx.template Alloc<MPDType>(beta1_pow_out),
dev_ctx.template Alloc<MPDType>(beta2_pow_out));
}
}
}
} // namespace sr
} // namespace phi
PD_REGISTER_KERNEL(adam_dense_param_sparse_grad,
GPU,
ALL_LAYOUT,
phi::sr::AdamDenseParamSparseGradKernel,
float,
double,
phi::dtype::float16) {
// Skip beta1_pow, beta2_pow, skip_update data transform
kernel->InputAt(5).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(6).SetBackend(phi::Backend::ALL_BACKEND);
kernel->InputAt(8).SetBackend(phi::Backend::ALL_BACKEND);
}
|
65cb03530d508a3f20d0aeec0db7695b7801165e.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_temp_pos;
glm::vec3 *dev_temp_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to hipFree in Boids::endSimulation.
hipMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel1 failed!");
hipMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
hipLaunchKernelGGL(( kernGenerateRandomPosArray), dim3(fullBlocksPerGrid), dim3(blockSize), 0, 0, 1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * ::max(::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
//2.1
hipMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleArrayIndices failed!");
hipMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_particleGridIndices failed!");
hipMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellStartIndices failed!");
hipMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
//2.3
hipMalloc((void**)&dev_temp_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_temp_pos failed!");
hipMalloc((void**)&dev_temp_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("hipMalloc dev_temp_vel failed!");
hipDeviceSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
hipDeviceSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ float magnitude(glm::vec3 a) {
return sqrt(a.x*a.x+a.y*a.y+a.z*a.z);
}
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
for (int i = 0; i < N; ++i)
{
if (i == iSelf) continue;
float distance = magnitude(thisPos - pos[i]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[i];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[i] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel[i];
++neighborCount3;
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
return thisVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) return;
glm::vec3 thisVel2 = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = magnitude(thisVel2);
if (speed > maxSpeed)
thisVel2 = (thisVel2 / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = thisVel2;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 gridxyz = (pos[index] - gridMin) * inverseCellWidth;
gridxyz = glm::vec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
gridIndices[index] = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
if (index == 0)
gridCellStartIndices[particleGridIndices[0]] = 0;
else if (index == N-1)
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
else if (particleGridIndices[index - 1] != particleGridIndices[index])
{
gridCellStartIndices[particleGridIndices[index]] = index;
gridCellEndIndices[particleGridIndices[index -1]] = index -1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 thisPos = pos[particleArrayIndices[index]];
glm::vec3 thisVel = vel1[particleArrayIndices[index]];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
// - Identify the grid cell that this particle is in
glm::vec3 gridxyz = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridxyz_int = glm::ivec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
///int cellIndex = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 disxyz = thisPos - gridMin - glm::vec3(gridxyz_int) * cellWidth;
glm::ivec3 RStart(gridxyz_int);
glm::ivec3 REnd(gridxyz_int);
for (int i = 0; i < 3; ++i)
{
if (disxyz[i] < cellWidth / 2)
RStart[i] = (gridxyz[i] - 1 >= 0) ? gridxyz[i] - 1 : gridxyz[i];
else
REnd[i] = (gridxyz[i] + 1 <= gridResolution - 1) ? gridxyz[i] + 1 : gridxyz[i];
}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for(int i = RStart.x; i <= REnd.x; ++i)
for(int j = RStart.y; j <= REnd.y; ++j)
for (int k = RStart.z; k <= REnd.z; ++k)
{
int curIndex = gridIndex3Dto1D(i, j, k, gridResolution);
for (int l = gridCellStartIndices[curIndex]; l <= gridCellEndIndices[curIndex]; ++l)
{
int now = particleArrayIndices[l];
if (curIndex == index) continue;
float distance = magnitude(thisPos - pos[now]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[now];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[now] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel1[now];
++neighborCount3;
}
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
float speed = magnitude(thisVel);
if (speed > maxSpeed)
thisVel = (thisVel / speed) * maxSpeed;
vel2[particleArrayIndices[index]] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
// - Identify the grid cell that this particle is in
glm::vec3 gridxyz = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridxyz_int= glm::ivec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
///int cellIndex = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 disxyz = thisPos - gridMin - glm::vec3(gridxyz_int) * cellWidth;
glm::ivec3 RStart(gridxyz_int);
glm::ivec3 REnd(gridxyz_int);
for (int i = 0; i < 3; ++i)
{
if (disxyz[i] < cellWidth / 2)
RStart[i] = (gridxyz[i] - 1 >= 0) ? gridxyz[i] - 1 : gridxyz[i];
else
REnd[i] = (gridxyz[i] + 1 <= gridResolution - 1) ? gridxyz[i] + 1 : gridxyz[i];
}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = RStart.x; i <= REnd.x; ++i)
for (int j = RStart.y; j <= REnd.y; ++j)
for (int k = RStart.z; k <= REnd.z; ++k)
{
int curIndex = gridIndex3Dto1D(i, j, k, gridResolution);
for (int l = gridCellStartIndices[curIndex]; l <= gridCellEndIndices[curIndex]; ++l)
{
if (curIndex == index) continue;
float distance = magnitude(thisPos - pos[l]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[l];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[l] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel1[l];
++neighborCount3;
}
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
float speed = magnitude(thisVel);
if (speed > maxSpeed)
thisVel = (thisVel / speed) * maxSpeed;
vel2[index] = thisVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
//Use vel1 and pos to compute new vel, and assign it to vel2
hipLaunchKernelGGL(( kernUpdateVelocityBruteForce) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dev_pos, dev_vel1, dev_vel2);
//Use vel2 and pos to compute new pos
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
///Here should swap but not simply dev_vel1 = dev_vel2
///this will result in dev1 and dev2 point to the same space
///which is not what we want
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
hipMemset(dev_gridCellStartIndices, -1, sizeof(int) * gridCellCount);
hipMemset(dev_gridCellEndIndices, -1, sizeof(int) * gridCellCount);
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
hipLaunchKernelGGL(( kernUpdatePos) , dim3(fullBlocksPerGrid), dim3(blockSize) , 0, 0, numObjects, dt, dev_pos, dev_vel2);
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
}
__global__ void kernTempPosVel(int N, int *Indices,
glm::vec3 *pos, glm::vec3 *vel1,
glm::vec3 *tempPos, glm::vec3 *tempVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
tempPos[index] = pos[Indices[index]];
tempVel[index] = vel1[Indices[index]];
/*
__syncthreads();
pos[index] = tempPos[index];
vel1[index] = tempVel[index];
*/
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
//Compute Indices
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//Sort Grid Indices by Array Indices
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
//Initialize Start Indices and End Indices
hipMemset(dev_gridCellStartIndices, -1, sizeof(int) * gridCellCount);
hipMemset(dev_gridCellEndIndices, -1, sizeof(int) * gridCellCount);
//Identify Cell Start and End
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
//Rearrange pos and vel1
kernTempPosVel << < fullBlocksPerGrid, blockSize >> >(numObjects,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_temp_pos, dev_temp_vel);
//Update Vel Neighbor Search Coherent
kernUpdateVelNeighborSearchCoherent << < fullBlocksPerGrid, blockSize >> >(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_temp_pos, dev_temp_vel, dev_vel2);
//Update Pos
kernUpdatePos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_temp_pos, dev_vel2);
//Ping-pong
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
//2.3
glm::vec3 *b = dev_temp_pos;
dev_temp_pos = dev_pos;
dev_pos = b;
}
void Boids::endSimulation() {
hipFree(dev_vel1);
hipFree(dev_vel2);
hipFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
//2.1
hipFree(dev_particleArrayIndices);
hipFree(dev_particleGridIndices);
hipFree(dev_gridCellStartIndices);
hipFree(dev_gridCellEndIndices);
//2.3
hipFree(dev_temp_pos);
hipFree(dev_temp_vel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
hipMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intKeys failed!");
hipMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
hipMemcpy(dev_intKeys, intKeys, sizeof(int) * N, hipMemcpyHostToDevice);
hipMemcpy(dev_intValues, intValues, sizeof(int) * N, hipMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
hipMemcpy(intKeys, dev_intKeys, sizeof(int) * N, hipMemcpyDeviceToHost);
hipMemcpy(intValues, dev_intValues, sizeof(int) * N, hipMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
hipFree(dev_intKeys);
hipFree(dev_intValues);
checkCUDAErrorWithLine("hipFree failed!");
return;
}
| 65cb03530d508a3f20d0aeec0db7695b7801165e.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <glm/glm.hpp>
#include "utilityCore.hpp"
#include "kernel.h"
// LOOK-2.1 potentially useful for doing grid-based neighbor search
#ifndef imax
#define imax( a, b ) ( ((a) > (b)) ? (a) : (b) )
#endif
#ifndef imin
#define imin( a, b ) ( ((a) < (b)) ? (a) : (b) )
#endif
#define checkCUDAErrorWithLine(msg) checkCUDAError(msg, __LINE__)
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
/*****************
* Configuration *
*****************/
/*! Block size used for CUDA kernel launch. */
#define blockSize 128
// LOOK-1.2 Parameters for the boids algorithm.
// These worked well in our reference implementation.
#define rule1Distance 5.0f
#define rule2Distance 3.0f
#define rule3Distance 5.0f
#define rule1Scale 0.01f
#define rule2Scale 0.1f
#define rule3Scale 0.1f
#define maxSpeed 1.0f
/*! Size of the starting area in simulation space. */
#define scene_scale 100.0f
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
int numObjects;
dim3 threadsPerBlock(blockSize);
// LOOK-1.2 - These buffers are here to hold all your boid information.
// These get allocated for you in Boids::initSimulation.
// Consider why you would need two velocity buffers in a simulation where each
// boid cares about its neighbors' velocities.
// These are called ping-pong buffers.
glm::vec3 *dev_pos;
glm::vec3 *dev_vel1;
glm::vec3 *dev_vel2;
// LOOK-2.1 - these are NOT allocated for you. You'll have to set up the thrust
// pointers on your own too.
// For efficient sorting and the uniform grid. These should always be parallel.
int *dev_particleArrayIndices; // What index in dev_pos and dev_velX represents this particle?
int *dev_particleGridIndices; // What grid cell is this particle in?
// needed for use with thrust
thrust::device_ptr<int> dev_thrust_particleArrayIndices;
thrust::device_ptr<int> dev_thrust_particleGridIndices;
int *dev_gridCellStartIndices; // What part of dev_particleArrayIndices belongs
int *dev_gridCellEndIndices; // to this cell?
// TODO-2.3 - consider what additional buffers you might need to reshuffle
// the position and velocity data to be coherent within cells.
glm::vec3 *dev_temp_pos;
glm::vec3 *dev_temp_vel;
// LOOK-2.1 - Grid parameters based on simulation parameters.
// These are automatically computed for you in Boids::initSimulation
int gridCellCount;
int gridSideCount;
float gridCellWidth;
float gridInverseCellWidth;
glm::vec3 gridMinimum;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
/**
* LOOK-1.2 - this is a typical helper function for a CUDA kernel.
* Function for generating a random vec3.
*/
__host__ __device__ glm::vec3 generateRandomVec3(float time, int index) {
thrust::default_random_engine rng(hash((int)(index * time)));
thrust::uniform_real_distribution<float> unitDistrib(-1, 1);
return glm::vec3((float)unitDistrib(rng), (float)unitDistrib(rng), (float)unitDistrib(rng));
}
/**
* LOOK-1.2 - This is a basic CUDA kernel.
* CUDA kernel for generating boids with a specified mass randomly around the star.
*/
__global__ void kernGenerateRandomPosArray(int time, int N, glm::vec3 * arr, float scale) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
glm::vec3 rand = generateRandomVec3(time, index);
arr[index].x = scale * rand.x;
arr[index].y = scale * rand.y;
arr[index].z = scale * rand.z;
}
}
/**
* Initialize memory, update some globals
*/
void Boids::initSimulation(int N) {
numObjects = N;
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
// LOOK-1.2 - This is basic CUDA memory management and error checking.
// Don't forget to cudaFree in Boids::endSimulation.
cudaMalloc((void**)&dev_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_vel1, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel1 failed!");
cudaMalloc((void**)&dev_vel2, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_vel2 failed!");
// LOOK-1.2 - This is a typical CUDA kernel invocation.
kernGenerateRandomPosArray<<<fullBlocksPerGrid, blockSize>>>(1, numObjects,
dev_pos, scene_scale);
checkCUDAErrorWithLine("kernGenerateRandomPosArray failed!");
// LOOK-2.1 computing grid params
gridCellWidth = 2.0f * std::max(std::max(rule1Distance, rule2Distance), rule3Distance);
int halfSideCount = (int)(scene_scale / gridCellWidth) + 1;
gridSideCount = 2 * halfSideCount;
gridCellCount = gridSideCount * gridSideCount * gridSideCount;
gridInverseCellWidth = 1.0f / gridCellWidth;
float halfGridWidth = gridCellWidth * halfSideCount;
gridMinimum.x -= halfGridWidth;
gridMinimum.y -= halfGridWidth;
gridMinimum.z -= halfGridWidth;
// TODO-2.1 TODO-2.3 - Allocate additional buffers here.
//2.1
cudaMalloc((void**)&dev_particleArrayIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleArrayIndices failed!");
cudaMalloc((void**)&dev_particleGridIndices, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_particleGridIndices failed!");
cudaMalloc((void**)&dev_gridCellStartIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellStartIndices failed!");
cudaMalloc((void**)&dev_gridCellEndIndices, gridCellCount * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_gridCellEndIndices failed!");
dev_thrust_particleArrayIndices = thrust::device_ptr<int>(dev_particleArrayIndices);
dev_thrust_particleGridIndices = thrust::device_ptr<int>(dev_particleGridIndices);
//2.3
cudaMalloc((void**)&dev_temp_pos, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_temp_pos failed!");
cudaMalloc((void**)&dev_temp_vel, N * sizeof(glm::vec3));
checkCUDAErrorWithLine("cudaMalloc dev_temp_vel failed!");
cudaThreadSynchronize();
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec3 *pos, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
float c_scale = -1.0f / s_scale;
if (index < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyVelocitiesToVBO(int N, glm::vec3 *vel, float *vbo, float s_scale) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index < N) {
vbo[4 * index + 0] = vel[index].x + 0.3f;
vbo[4 * index + 1] = vel[index].y + 0.3f;
vbo[4 * index + 2] = vel[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void Boids::copyBoidsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_pos, vbodptr_positions, scene_scale);
kernCopyVelocitiesToVBO << <fullBlocksPerGrid, blockSize >> >(numObjects, dev_vel1, vbodptr_velocities, scene_scale);
checkCUDAErrorWithLine("copyBoidsToVBO failed!");
cudaThreadSynchronize();
}
/******************
* stepSimulation *
******************/
/**
* LOOK-1.2 You can use this as a helper for kernUpdateVelocityBruteForce.
* __device__ code can be called from a __global__ context
* Compute the new velocity on the body with index `iSelf` due to the `N` boids
* in the `pos` and `vel` arrays.
*/
__device__ float magnitude(glm::vec3 a) {
return sqrt(a.x*a.x+a.y*a.y+a.z*a.z);
}
__device__ glm::vec3 computeVelocityChange(int N, int iSelf, const glm::vec3 *pos, const glm::vec3 *vel) {
glm::vec3 thisPos = pos[iSelf];
glm::vec3 thisVel = vel[iSelf];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
for (int i = 0; i < N; ++i)
{
if (i == iSelf) continue;
float distance = magnitude(thisPos - pos[i]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[i];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[i] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel[i];
++neighborCount3;
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
return thisVel;
}
/**
* TODO-1.2 implement basic flocking
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdateVelocityBruteForce(int N, glm::vec3 *pos,
glm::vec3 *vel1, glm::vec3 *vel2) {
// Compute a new velocity based on pos and vel1
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) return;
glm::vec3 thisVel2 = computeVelocityChange(N, index, pos, vel1);
// Clamp the speed
float speed = magnitude(thisVel2);
if (speed > maxSpeed)
thisVel2 = (thisVel2 / speed) * maxSpeed;
// Record the new velocity into vel2. Question: why NOT vel1?
vel2[index] = thisVel2;
}
/**
* LOOK-1.2 Since this is pretty trivial, we implemented it for you.
* For each of the `N` bodies, update its position based on its current velocity.
*/
__global__ void kernUpdatePos(int N, float dt, glm::vec3 *pos, glm::vec3 *vel) {
// Update position by velocity
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
glm::vec3 thisPos = pos[index];
thisPos += vel[index] * dt;
// Wrap the boids around so we don't lose them
thisPos.x = thisPos.x < -scene_scale ? scene_scale : thisPos.x;
thisPos.y = thisPos.y < -scene_scale ? scene_scale : thisPos.y;
thisPos.z = thisPos.z < -scene_scale ? scene_scale : thisPos.z;
thisPos.x = thisPos.x > scene_scale ? -scene_scale : thisPos.x;
thisPos.y = thisPos.y > scene_scale ? -scene_scale : thisPos.y;
thisPos.z = thisPos.z > scene_scale ? -scene_scale : thisPos.z;
pos[index] = thisPos;
}
// LOOK-2.1 Consider this method of computing a 1D index from a 3D grid index.
// LOOK-2.3 Looking at this method, what would be the most memory efficient
// order for iterating over neighboring grid cells?
// for(x)
// for(y)
// for(z)? Or some other order?
__device__ int gridIndex3Dto1D(int x, int y, int z, int gridResolution) {
return x + y * gridResolution + z * gridResolution * gridResolution;
}
__global__ void kernComputeIndices(int N, int gridResolution,
glm::vec3 gridMin, float inverseCellWidth,
glm::vec3 *pos, int *indices, int *gridIndices) {
// TODO-2.1
// - Label each boid with the index of its grid cell.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 gridxyz = (pos[index] - gridMin) * inverseCellWidth;
gridxyz = glm::vec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
gridIndices[index] = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Set up a parallel array of integer indices as pointers to the actual
// boid data in pos and vel1/vel2
indices[index] = index;
}
// LOOK-2.1 Consider how this could be useful for indicating that a cell
// does not enclose any boids
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernIdentifyCellStartEnd(int N, int *particleGridIndices,
int *gridCellStartIndices, int *gridCellEndIndices) {
// TODO-2.1
// Identify the start point of each cell in the gridIndices array.
// This is basically a parallel unrolling of a loop that goes
// "this index doesn't match the one before it, must be a new cell!"
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
if (index == 0)
gridCellStartIndices[particleGridIndices[0]] = 0;
else if (index == N-1)
gridCellEndIndices[particleGridIndices[N - 1]] = N - 1;
else if (particleGridIndices[index - 1] != particleGridIndices[index])
{
gridCellStartIndices[particleGridIndices[index]] = index;
gridCellEndIndices[particleGridIndices[index -1]] = index -1;
}
}
__global__ void kernUpdateVelNeighborSearchScattered(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
int *particleArrayIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.1 - Update a boid's velocity using the uniform grid to reduce
// the number of boids that need to be checked.
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 thisPos = pos[particleArrayIndices[index]];
glm::vec3 thisVel = vel1[particleArrayIndices[index]];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
// - Identify the grid cell that this particle is in
glm::vec3 gridxyz = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridxyz_int = glm::ivec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
///int cellIndex = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 disxyz = thisPos - gridMin - glm::vec3(gridxyz_int) * cellWidth;
glm::ivec3 RStart(gridxyz_int);
glm::ivec3 REnd(gridxyz_int);
for (int i = 0; i < 3; ++i)
{
if (disxyz[i] < cellWidth / 2)
RStart[i] = (gridxyz[i] - 1 >= 0) ? gridxyz[i] - 1 : gridxyz[i];
else
REnd[i] = (gridxyz[i] + 1 <= gridResolution - 1) ? gridxyz[i] + 1 : gridxyz[i];
}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for(int i = RStart.x; i <= REnd.x; ++i)
for(int j = RStart.y; j <= REnd.y; ++j)
for (int k = RStart.z; k <= REnd.z; ++k)
{
int curIndex = gridIndex3Dto1D(i, j, k, gridResolution);
for (int l = gridCellStartIndices[curIndex]; l <= gridCellEndIndices[curIndex]; ++l)
{
int now = particleArrayIndices[l];
if (curIndex == index) continue;
float distance = magnitude(thisPos - pos[now]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[now];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[now] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel1[now];
++neighborCount3;
}
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
float speed = magnitude(thisVel);
if (speed > maxSpeed)
thisVel = (thisVel / speed) * maxSpeed;
vel2[particleArrayIndices[index]] = thisVel;
}
__global__ void kernUpdateVelNeighborSearchCoherent(
int N, int gridResolution, glm::vec3 gridMin,
float inverseCellWidth, float cellWidth,
int *gridCellStartIndices, int *gridCellEndIndices,
glm::vec3 *pos, glm::vec3 *vel1, glm::vec3 *vel2) {
// TODO-2.3 - This should be very similar to kernUpdateVelNeighborSearchScattered,
// except with one less level of indirection.
// This should expect gridCellStartIndices and gridCellEndIndices to refer
// directly to pos and vel1.
// - Identify the grid cell that this particle is in
// - Identify which cells may contain neighbors. This isn't always 8.
// - For each cell, read the start/end indices in the boid pointer array.
// DIFFERENCE: For best results, consider what order the cells should be
// checked in to maximize the memory benefits of reordering the boids data.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
// - Clamp the speed change before putting the new speed in vel2
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
glm::vec3 thisPos = pos[index];
glm::vec3 thisVel = vel1[index];
glm::vec3 center_pos(0.0, 0.0, 0.0);
glm::vec3 separate_dis(0.0, 0.0, 0.0);
glm::vec3 cohesion_vel(0.0, 0.0, 0.0);
int neighborCount1 = 0, neighborCount3 = 0;
// - Identify the grid cell that this particle is in
glm::vec3 gridxyz = (thisPos - gridMin) * inverseCellWidth;
glm::ivec3 gridxyz_int= glm::ivec3(int(gridxyz.x), int(gridxyz.y), int(gridxyz.z));
///int cellIndex = gridIndex3Dto1D(gridxyz.x, gridxyz.y, gridxyz.z, gridResolution);
// - Identify which cells may contain neighbors. This isn't always 8.
glm::vec3 disxyz = thisPos - gridMin - glm::vec3(gridxyz_int) * cellWidth;
glm::ivec3 RStart(gridxyz_int);
glm::ivec3 REnd(gridxyz_int);
for (int i = 0; i < 3; ++i)
{
if (disxyz[i] < cellWidth / 2)
RStart[i] = (gridxyz[i] - 1 >= 0) ? gridxyz[i] - 1 : gridxyz[i];
else
REnd[i] = (gridxyz[i] + 1 <= gridResolution - 1) ? gridxyz[i] + 1 : gridxyz[i];
}
// - For each cell, read the start/end indices in the boid pointer array.
// - Access each boid in the cell and compute velocity change from
// the boids rules, if this boid is within the neighborhood distance.
for (int i = RStart.x; i <= REnd.x; ++i)
for (int j = RStart.y; j <= REnd.y; ++j)
for (int k = RStart.z; k <= REnd.z; ++k)
{
int curIndex = gridIndex3Dto1D(i, j, k, gridResolution);
for (int l = gridCellStartIndices[curIndex]; l <= gridCellEndIndices[curIndex]; ++l)
{
if (curIndex == index) continue;
float distance = magnitude(thisPos - pos[l]);
// Rule 1: boids fly towards their local perceived center of mass, which excludes themselves
if (distance < rule1Distance)
{
center_pos += pos[l];
++neighborCount1;
}
// Rule 2: boids try to stay a distance d away from each other
if (distance < rule2Distance)
separate_dis -= pos[l] - thisPos;
// Rule 3: boids try to match the speed of surrounding boids
if (distance < rule3Distance)
{
cohesion_vel += vel1[l];
++neighborCount3;
}
}
}
if (neighborCount1 != 0)
{
center_pos /= neighborCount1;
thisVel += (center_pos - thisPos) * rule1Scale;
}
thisVel += separate_dis * rule2Scale;
if (neighborCount1 != 0)
{
cohesion_vel /= neighborCount3;
thisVel += cohesion_vel * rule3Scale;
}
// - Clamp the speed change before putting the new speed in vel2
float speed = magnitude(thisVel);
if (speed > maxSpeed)
thisVel = (thisVel / speed) * maxSpeed;
vel2[index] = thisVel;
}
/**
* Step the entire N-body simulation by `dt` seconds.
*/
void Boids::stepSimulationNaive(float dt) {
// TODO-1.2 - use the kernels you wrote to step the simulation forward in time.
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
//Use vel1 and pos to compute new vel, and assign it to vel2
kernUpdateVelocityBruteForce <<< fullBlocksPerGrid, blockSize >>>(numObjects, dev_pos, dev_vel1, dev_vel2);
//Use vel2 and pos to compute new pos
kernUpdatePos <<< fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel2);
// TODO-1.2 ping-pong the velocity buffers
///Here should swap but not simply dev_vel1 = dev_vel2
///this will result in dev1 and dev2 point to the same space
///which is not what we want
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
}
void Boids::stepSimulationScatteredGrid(float dt) {
// TODO-2.1
// Uniform Grid Neighbor search using Thrust sort.
// In Parallel:
// - label each particle with its array index as well as its grid index.
// Use 2x width grids.
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
cudaMemset(dev_gridCellStartIndices, -1, sizeof(int) * gridCellCount);
cudaMemset(dev_gridCellEndIndices, -1, sizeof(int) * gridCellCount);
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects, dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
kernUpdateVelNeighborSearchScattered << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_particleArrayIndices, dev_pos, dev_vel1, dev_vel2);
kernUpdatePos <<< fullBlocksPerGrid, blockSize >>>(numObjects, dt, dev_pos, dev_vel2);
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
}
__global__ void kernTempPosVel(int N, int *Indices,
glm::vec3 *pos, glm::vec3 *vel1,
glm::vec3 *tempPos, glm::vec3 *tempVel) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index >= N) return;
tempPos[index] = pos[Indices[index]];
tempVel[index] = vel1[Indices[index]];
/*
__syncthreads();
pos[index] = tempPos[index];
vel1[index] = tempVel[index];
*/
}
void Boids::stepSimulationCoherentGrid(float dt) {
// TODO-2.3 - start by copying Boids::stepSimulationNaiveGrid
// Uniform Grid Neighbor search using Thrust sort on cell-coherent data.
// In Parallel:
// - Label each particle with its array index as well as its grid index.
// Use 2x width grids
// - Unstable key sort using Thrust. A stable sort isn't necessary, but you
// are welcome to do a performance comparison.
// - Naively unroll the loop for finding the start and end indices of each
// cell's data pointers in the array of boid indices
// - BIG DIFFERENCE: use the rearranged array index buffer to reshuffle all
// the particle data in the simulation array.
// CONSIDER WHAT ADDITIONAL BUFFERS YOU NEED
// - Perform velocity updates using neighbor search
// - Update positions
// - Ping-pong buffers as needed. THIS MAY BE DIFFERENT FROM BEFORE.
dim3 threadsPerBlock(blockSize);
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
//Compute Indices
kernComputeIndices << < fullBlocksPerGrid, blockSize >> >(numObjects, gridSideCount, gridMinimum,
gridInverseCellWidth, dev_pos, dev_particleArrayIndices, dev_particleGridIndices);
//Sort Grid Indices by Array Indices
thrust::sort_by_key(dev_thrust_particleGridIndices, dev_thrust_particleGridIndices + numObjects, dev_thrust_particleArrayIndices);
//Initialize Start Indices and End Indices
cudaMemset(dev_gridCellStartIndices, -1, sizeof(int) * gridCellCount);
cudaMemset(dev_gridCellEndIndices, -1, sizeof(int) * gridCellCount);
//Identify Cell Start and End
kernIdentifyCellStartEnd << < fullBlocksPerGrid, blockSize >> >(numObjects,
dev_particleGridIndices, dev_gridCellStartIndices, dev_gridCellEndIndices);
//Rearrange pos and vel1
kernTempPosVel << < fullBlocksPerGrid, blockSize >> >(numObjects,
dev_particleArrayIndices, dev_pos, dev_vel1, dev_temp_pos, dev_temp_vel);
//Update Vel Neighbor Search Coherent
kernUpdateVelNeighborSearchCoherent << < fullBlocksPerGrid, blockSize >> >(numObjects,
gridSideCount, gridMinimum, gridInverseCellWidth, gridCellWidth, dev_gridCellStartIndices, dev_gridCellEndIndices, dev_temp_pos, dev_temp_vel, dev_vel2);
//Update Pos
kernUpdatePos << < fullBlocksPerGrid, blockSize >> >(numObjects, dt, dev_temp_pos, dev_vel2);
//Ping-pong
glm::vec3 *a = dev_vel1;
dev_vel1 = dev_vel2;
dev_vel2 = a;
//2.3
glm::vec3 *b = dev_temp_pos;
dev_temp_pos = dev_pos;
dev_pos = b;
}
void Boids::endSimulation() {
cudaFree(dev_vel1);
cudaFree(dev_vel2);
cudaFree(dev_pos);
// TODO-2.1 TODO-2.3 - Free any additional buffers here.
//2.1
cudaFree(dev_particleArrayIndices);
cudaFree(dev_particleGridIndices);
cudaFree(dev_gridCellStartIndices);
cudaFree(dev_gridCellEndIndices);
//2.3
cudaFree(dev_temp_pos);
cudaFree(dev_temp_vel);
}
void Boids::unitTest() {
// LOOK-1.2 Feel free to write additional tests here.
// test unstable sort
int *dev_intKeys;
int *dev_intValues;
int N = 10;
int *intKeys = new int[N];
int *intValues = new int[N];
intKeys[0] = 0; intValues[0] = 0;
intKeys[1] = 1; intValues[1] = 1;
intKeys[2] = 0; intValues[2] = 2;
intKeys[3] = 3; intValues[3] = 3;
intKeys[4] = 0; intValues[4] = 4;
intKeys[5] = 2; intValues[5] = 5;
intKeys[6] = 2; intValues[6] = 6;
intKeys[7] = 0; intValues[7] = 7;
intKeys[8] = 5; intValues[8] = 8;
intKeys[9] = 6; intValues[9] = 9;
cudaMalloc((void**)&dev_intKeys, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intKeys failed!");
cudaMalloc((void**)&dev_intValues, N * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_intValues failed!");
dim3 fullBlocksPerGrid((N + blockSize - 1) / blockSize);
std::cout << "before unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// How to copy data to the GPU
cudaMemcpy(dev_intKeys, intKeys, sizeof(int) * N, cudaMemcpyHostToDevice);
cudaMemcpy(dev_intValues, intValues, sizeof(int) * N, cudaMemcpyHostToDevice);
// Wrap device vectors in thrust iterators for use with thrust.
thrust::device_ptr<int> dev_thrust_keys(dev_intKeys);
thrust::device_ptr<int> dev_thrust_values(dev_intValues);
// LOOK-2.1 Example for using thrust::sort_by_key
thrust::sort_by_key(dev_thrust_keys, dev_thrust_keys + N, dev_thrust_values);
// How to copy data back to the CPU side from the GPU
cudaMemcpy(intKeys, dev_intKeys, sizeof(int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy(intValues, dev_intValues, sizeof(int) * N, cudaMemcpyDeviceToHost);
checkCUDAErrorWithLine("memcpy back failed!");
std::cout << "after unstable sort: " << std::endl;
for (int i = 0; i < N; i++) {
std::cout << " key: " << intKeys[i];
std::cout << " value: " << intValues[i] << std::endl;
}
// cleanup
delete[] intKeys;
delete[] intValues;
cudaFree(dev_intKeys);
cudaFree(dev_intValues);
checkCUDAErrorWithLine("cudaFree failed!");
return;
}
|
f955fa99bf9496be283ab0eb4d07882f4172852e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void square(float* d_out, float* d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
} | f955fa99bf9496be283ab0eb4d07882f4172852e.cu | #include "includes.h"
__global__ void square(float* d_out, float* d_in){
int idx = threadIdx.x;
float f = d_in[idx];
d_out[idx] = f*f;
} |
17bf03bcbdeaacb499b11806ff3c2b76ccd641f2.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static constexpr size_t kConvCUDNNWorkspaceLimitBytes = 1024 * 1024 * 1024;
template <typename T, int D>
static void DataTranspose(const framework::ExecutionContext& ctx,
const Tensor* input, Tensor* output,
const std::vector<int>& axis, int flag = 0) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
math::Transpose<platform::CUDADeviceContext, T, D> transpose;
auto in_dims = input->dims();
std::vector<int64_t> input_transpose_vec;
for (size_t i = 0; i < axis.size(); ++i) {
if (flag == 0)
input_transpose_vec.push_back(in_dims[axis[i]]);
else
input_transpose_vec.push_back(in_dims[i]);
}
framework::DDim input_transpose_dims(
framework::make_ddim(input_transpose_vec));
output->mutable_data<T>(input_transpose_dims, ctx.GetPlace());
transpose(dev_ctx, *input, output, axis);
}
template <typename T>
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const T* filter_data = filter->data<T>();
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec = framework::vectorize<int>(output->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
}
} else {
input_transpose = *input;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_transpose.dims()[0];
new_input_shape_vec[1] = input_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
input_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
default:
PADDLE_ENFORCE_EQ(
rank == 4 || rank == 5, true,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor.");
}
} else {
transformed_input = input_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + output_vec[i + 2];
axes[i] = i + 2;
}
const T* input_data = transformed_input.data<T>();
input_vec = framework::vectorize<int>(transformed_input.dims());
std::vector<int> transformed_output_vec = output_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_output_vec[i + 2] =
output_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
}
Tensor transformed_output;
if (!is_sys_pad) {
DDim transformed_output_shape(
framework::make_ddim(transformed_output_vec));
transformed_output.mutable_data<T>(transformed_output_shape,
ctx.GetPlace());
} else {
output->mutable_data<T>(ctx.GetPlace());
transformed_output.ShareDataWith(*output);
transformed_output.Resize(framework::make_ddim(transformed_output_vec));
}
T* transformed_output_data = transformed_output.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedConvolutionDescriptor conv_desc;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
// (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc =
input_desc.descriptor<T>(layout, input_vec, groups);
// (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc =
output_desc.descriptor<T>(layout, transformed_output_vec, groups);
// (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()), groups);
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes;
if (user_workspace_size > 0) {
workspace_size_limit = user_workspace_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t algo;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Get the algorithm
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm(
handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc,
// dxDesc: Handle to the previously initialized output tensor
// descriptor.
cudnn_output_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &algo));
if (algo == 0 && FLAGS_cudnn_deterministic) {
algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(1);
}
// get workspace size able to allocate
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize(
handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
// ------------------- cudnn conv transpose forward ---------------------
int input_offset =
transformed_input.numel() / transformed_input.dims()[0] / groups;
int output_offset =
transformed_output.numel() / transformed_output.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, cudnn_filter_desc,
filter_data + filter_offset * g, cudnn_input_desc,
input_data + input_offset * g, cudnn_conv_desc, algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_output_desc,
transformed_output_data + output_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
if (!is_sys_pad && strides.size() == 2U) {
Slice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_output, output, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
Slice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_output, output, starts, ends, axes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor output_transpose;
Tensor output_nchw;
output_nchw.ShareDataWith(*output);
output_nchw.Resize(framework::make_ddim(output_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
}
}
}
};
template <typename T>
class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
Tensor output_grad_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec =
framework::vectorize<int>(output_grad->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis);
}
} else {
input_transpose = *input;
output_grad_transpose = *output_grad;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_output_grad;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0];
new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_output_grad_shape_vec[i + 2] =
output_grad_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_output_grad_shape(
framework::make_ddim(new_output_grad_shape_vec));
transformed_output_grad.Resize(new_output_grad_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_output_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_output_grad_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
default:
PADDLE_ENFORCE_EQ(
rank == 4 || rank == 5, true,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor.");
}
} else {
transformed_output_grad = output_grad_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = input_transpose.data<T>();
const T* output_grad_data = transformed_output_grad.data<T>();
output_vec = framework::vectorize<int>(transformed_output_grad.dims());
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedConvolutionDescriptor conv_desc;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
// Input: (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc =
input_desc.descriptor<T>(layout, input_vec, groups);
// Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc =
output_desc.descriptor<T>(layout, output_vec, groups);
// Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()), groups);
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionFwdAlgo_t data_algo;
cudnnConvolutionBwdFilterAlgo_t filter_algo;
size_t bwd_filter_ws_size, fwd_ws_size;
size_t workspace_size_in_bytes = 0;
size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes;
if (user_workspace_size > 0) {
workspace_size_limit = user_workspace_size * 1024 * 1024;
}
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
if (input_grad) {
// choose backward algorithm for data
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_input_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &data_algo));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_input_desc, data_algo, &fwd_ws_size));
workspace_size_in_bytes = ::max(workspace_size_in_bytes, fwd_ws_size);
}
if (filter_grad) {
// choose backward algorithm for filter
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm(
handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_filter_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &filter_algo));
// get workspace for backwards filter algorithm
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize(
handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_filter_desc, filter_algo, &bwd_filter_ws_size));
workspace_size_in_bytes =
::max(workspace_size_in_bytes, bwd_filter_ws_size);
}
// ------------------- cudnn conv backward data ---------------------
// FIXME(typhoonzero): template type T may not be the same as cudnn call.
int input_offset = input->numel() / input->dims()[0] / groups;
int output_grad_offset = transformed_output_grad.numel() /
transformed_output_grad.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad.
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, cudnn_output_desc,
output_grad_data + output_grad_offset * g, cudnn_filter_desc,
filter_data + filter_offset * g, cudnn_conv_desc, data_algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_input_desc, input_grad_data + input_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor input_grad_transpose;
Tensor input_grad_nchw;
input_grad_nchw.ShareDataWith(*input_grad);
input_grad_nchw.Resize(framework::make_ddim(input_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, cudnn_output_desc,
output_grad_data + output_grad_offset * g, cudnn_input_desc,
input_data + input_offset * g, cudnn_conv_desc, filter_algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_filter_desc, filter_grad_data + filter_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
| 17bf03bcbdeaacb499b11806ff3c2b76ccd641f2.cu | /* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include "paddle/fluid/framework/eigen.h"
#include "paddle/fluid/framework/op_registry.h"
#include "paddle/fluid/memory/memory.h"
#include "paddle/fluid/operators/conv_transpose_op.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/operators/math/padding.h"
#include "paddle/fluid/platform/cudnn_helper.h"
namespace paddle {
namespace operators {
using Tensor = framework::Tensor;
using ScopedTensorDescriptor = platform::ScopedTensorDescriptor;
using ScopedFilterDescriptor = platform::ScopedFilterDescriptor;
using ScopedConvolutionDescriptor = platform::ScopedConvolutionDescriptor;
using DataLayout = platform::DataLayout;
static constexpr size_t kConvCUDNNWorkspaceLimitBytes = 1024 * 1024 * 1024;
template <typename T, int D>
static void DataTranspose(const framework::ExecutionContext& ctx,
const Tensor* input, Tensor* output,
const std::vector<int>& axis, int flag = 0) {
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
math::Transpose<platform::CUDADeviceContext, T, D> transpose;
auto in_dims = input->dims();
std::vector<int64_t> input_transpose_vec;
for (size_t i = 0; i < axis.size(); ++i) {
if (flag == 0)
input_transpose_vec.push_back(in_dims[axis[i]]);
else
input_transpose_vec.push_back(in_dims[i]);
}
framework::DDim input_transpose_dims(
framework::make_ddim(input_transpose_vec));
output->mutable_data<T>(input_transpose_dims, ctx.GetPlace());
transpose(dev_ctx, *input, output, axis);
}
template <typename T>
class CUDNNConvTransposeOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true,
"It must use CUDAPlace.");
auto* input = ctx.Input<Tensor>("Input");
auto* filter = ctx.Input<Tensor>("Filter");
auto* output = ctx.Output<Tensor>("Output");
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const T* filter_data = filter->data<T>();
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec = framework::vectorize<int>(output->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
}
} else {
input_transpose = *input;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_input;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = input_transpose.dims()[0];
new_input_shape_vec[1] = input_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
input_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_input_shape(
framework::make_ddim(new_input_shape_vec));
transformed_input.Resize(new_input_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_input =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_input_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, input_transpose, pad_value, &transformed_input);
} break;
default:
PADDLE_ENFORCE_EQ(
rank == 4 || rank == 5, true,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor.");
}
} else {
transformed_input = input_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
std::vector<int64_t> starts(data_dim, 0);
std::vector<int64_t> ends(data_dim, 0);
std::vector<int64_t> axes(data_dim, 0);
for (size_t i = 0; i < data_dim; ++i) {
starts[i] = input_pad[2 * i + 4] * (strides[i] + 1);
ends[i] = starts[i] + output_vec[i + 2];
axes[i] = i + 2;
}
const T* input_data = transformed_input.data<T>();
input_vec = framework::vectorize<int>(transformed_input.dims());
std::vector<int> transformed_output_vec = output_vec;
for (size_t i = 0; i < data_dim; ++i) {
transformed_output_vec[i + 2] =
output_vec[i + 2] +
(input_pad[2 * i + 4] + input_pad[2 * i + 5]) * strides[i] -
2 * padding_common[i] + paddings[2 * i] + paddings[2 * i + 1];
}
Tensor transformed_output;
if (!is_sys_pad) {
DDim transformed_output_shape(
framework::make_ddim(transformed_output_vec));
transformed_output.mutable_data<T>(transformed_output_shape,
ctx.GetPlace());
} else {
output->mutable_data<T>(ctx.GetPlace());
transformed_output.ShareDataWith(*output);
transformed_output.Resize(framework::make_ddim(transformed_output_vec));
}
T* transformed_output_data = transformed_output.data<T>();
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedConvolutionDescriptor conv_desc;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
// (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc =
input_desc.descriptor<T>(layout, input_vec, groups);
// (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc =
output_desc.descriptor<T>(layout, transformed_output_vec, groups);
// (M, C, K_h, K_w) or (M, C, K_d, K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()), groups);
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
// ------------------- cudnn conv workspace ---------------------
size_t workspace_size_in_bytes; // final workspace to allocate.
size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes;
if (user_workspace_size > 0) {
workspace_size_limit = user_workspace_size * 1024 * 1024;
}
// ------------------- cudnn conv algorithm ---------------------
cudnnConvolutionBwdDataAlgo_t algo;
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
// Get the algorithm
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardDataAlgorithm(
handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc,
// dxDesc: Handle to the previously initialized output tensor
// descriptor.
cudnn_output_desc,
CUDNN_CONVOLUTION_BWD_DATA_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &algo));
if (algo == 0 && FLAGS_cudnn_deterministic) {
algo = static_cast<cudnnConvolutionBwdDataAlgo_t>(1);
}
// get workspace size able to allocate
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardDataWorkspaceSize(
handle, cudnn_filter_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_output_desc, algo, &workspace_size_in_bytes));
// ------------------- cudnn conv transpose forward ---------------------
int input_offset =
transformed_input.numel() / transformed_input.dims()[0] / groups;
int output_offset =
transformed_output.numel() / transformed_output.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardData(
handle, &alpha, cudnn_filter_desc,
filter_data + filter_offset * g, cudnn_input_desc,
input_data + input_offset * g, cudnn_conv_desc, algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_output_desc,
transformed_output_data + output_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
if (!is_sys_pad && strides.size() == 2U) {
Slice<paddle::platform::CUDADeviceContext, T, 4>(
ctx, &transformed_output, output, starts, ends, axes);
} else if (!is_sys_pad && strides.size() == 3U) {
Slice<paddle::platform::CUDADeviceContext, T, 5>(
ctx, &transformed_output, output, starts, ends, axes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor output_transpose;
Tensor output_nchw;
output_nchw.ShareDataWith(*output);
output_nchw.Resize(framework::make_ddim(output_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &output_nchw, &output_transpose, axis);
*output = output_transpose;
}
}
}
};
template <typename T>
class CUDNNConvTransposeGradOpKernel : public framework::OpKernel<T> {
public:
void Compute(const framework::ExecutionContext& ctx) const override {
PADDLE_ENFORCE(platform::is_gpu_place(ctx.GetPlace()),
"It must use CUDAPlace.");
auto input = ctx.Input<Tensor>("Input");
auto filter = ctx.Input<Tensor>("Filter");
auto output_grad = ctx.Input<Tensor>(framework::GradVarName("Output"));
auto input_grad = ctx.Output<Tensor>(framework::GradVarName("Input"));
auto filter_grad = ctx.Output<Tensor>(framework::GradVarName("Filter"));
const T* filter_data = filter->data<T>();
std::vector<int> strides = ctx.Attr<std::vector<int>>("strides");
std::vector<int> paddings = ctx.Attr<std::vector<int>>("paddings");
// cudnn v5 does not support dilations
std::vector<int> dilations = ctx.Attr<std::vector<int>>("dilations");
int groups = ctx.Attr<int>("groups");
std::string padding_algorithm = ctx.Attr<std::string>("padding_algorithm");
int user_workspace_size = ctx.Attr<int>("workspace_size_MB");
const std::string data_layout_str = ctx.Attr<std::string>("data_format");
const paddle::operators::DataLayout data_layout =
(data_layout_str != "NHWC" ? DataLayout::kNCHW : DataLayout::kNHWC);
// if channel_last, transpose to channel_first
Tensor input_transpose;
Tensor output_grad_transpose;
std::vector<int> input_vec = framework::vectorize<int>(input->dims());
std::vector<int> output_vec =
framework::vectorize<int>(output_grad->dims());
if (data_layout == DataLayout::kNHWC) {
if (strides.size() == 2U) {
std::vector<int> axis = {0, 3, 1, 2};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 4>(ctx, input, &input_transpose, axis);
DataTranspose<T, 4>(ctx, output_grad, &output_grad_transpose, axis);
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 4, 1, 2, 3};
for (size_t i = 0; i < axis.size(); ++i) {
input_vec[i] = input->dims()[axis[i]];
output_vec[i] = output_grad->dims()[axis[i]];
}
DataTranspose<T, 5>(ctx, input, &input_transpose, axis);
DataTranspose<T, 5>(ctx, output_grad, &output_grad_transpose, axis);
}
} else {
input_transpose = *input;
output_grad_transpose = *output_grad;
}
// update padding and dilation
auto in_dims = input_transpose.dims();
auto filter_dims = filter->dims();
framework::DDim in_data_dims;
in_data_dims = framework::slice_ddim(in_dims, 2, in_dims.size());
framework::DDim filter_data_dims =
framework::slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = framework::vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(&paddings, &dilations, padding_algorithm,
in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = math::IsSymmetricPadding(paddings, data_dim);
std::vector<int> input_pad(input_transpose.dims().size() * 2, 0);
Tensor transformed_output_grad;
std::vector<int> padding_common(data_dim, 0);
if (!is_sys_pad) {
std::vector<int> padding_diff(data_dim);
std::vector<int> new_output_grad_shape_vec(data_dim + 2);
new_output_grad_shape_vec[0] = output_grad_transpose.dims()[0];
new_output_grad_shape_vec[1] = output_grad_transpose.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_output_grad_shape_vec[i + 2] =
output_grad_transpose.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
framework::DDim new_output_grad_shape(
framework::make_ddim(new_output_grad_shape_vec));
transformed_output_grad.Resize(new_output_grad_shape);
auto& dev_ctx =
ctx.template device_context<paddle::platform::CUDADeviceContext>();
transformed_output_grad =
ctx.AllocateTmpTensor<T, paddle::platform::CUDADeviceContext>(
new_output_grad_shape, dev_ctx);
const int rank = input_transpose.dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 4>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
case 5: {
math::PadFunction<paddle::platform::CUDADeviceContext, T, 5>(
ctx, input_pad, output_grad_transpose, pad_value,
&transformed_output_grad);
} break;
default:
PADDLE_ENFORCE_EQ(
rank == 4 || rank == 5, true,
"Op(ConvTranspose) only supports 4-D or 5-D input Tensor.");
}
} else {
transformed_output_grad = output_grad_transpose;
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* input_data = input_transpose.data<T>();
const T* output_grad_data = transformed_output_grad.data<T>();
output_vec = framework::vectorize<int>(transformed_output_grad.dims());
// ------------------- cudnn descriptors ---------------------
ScopedTensorDescriptor input_desc;
ScopedTensorDescriptor output_desc;
ScopedFilterDescriptor filter_desc;
ScopedConvolutionDescriptor conv_desc;
DataLayout layout;
if (strides.size() == 2U) {
layout = DataLayout::kNCHW;
} else {
layout = DataLayout::kNCDHW;
}
// Input: (N, M, H, W) or (N, M, D, H, W)
cudnnTensorDescriptor_t cudnn_input_desc =
input_desc.descriptor<T>(layout, input_vec, groups);
// Output: (N, C, O_h, O_w) or (N, C, O_d, O_h, O_w)
cudnnTensorDescriptor_t cudnn_output_desc =
output_desc.descriptor<T>(layout, output_vec, groups);
// Filter (M, C, K_h, K_w) or (M, C, K_d K_h, K_w)
cudnnFilterDescriptor_t cudnn_filter_desc = filter_desc.descriptor<T>(
layout, framework::vectorize<int>(filter->dims()), groups);
cudnnConvolutionDescriptor_t cudnn_conv_desc =
conv_desc.descriptor<T>(padding_common, strides, dilations);
// ------------------- cudnn backward algorithm ---------------------
cudnnConvolutionFwdAlgo_t data_algo;
cudnnConvolutionBwdFilterAlgo_t filter_algo;
size_t bwd_filter_ws_size, fwd_ws_size;
size_t workspace_size_in_bytes = 0;
size_t workspace_size_limit = kConvCUDNNWorkspaceLimitBytes;
if (user_workspace_size > 0) {
workspace_size_limit = user_workspace_size * 1024 * 1024;
}
auto& dev_ctx = ctx.template device_context<platform::CUDADeviceContext>();
auto handle = dev_ctx.cudnn_handle();
if (input_grad) {
// choose backward algorithm for data
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardAlgorithm(
handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_input_desc, CUDNN_CONVOLUTION_FWD_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &data_algo));
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionForwardWorkspaceSize(
handle, cudnn_output_desc, cudnn_filter_desc, cudnn_conv_desc,
cudnn_input_desc, data_algo, &fwd_ws_size));
workspace_size_in_bytes = std::max(workspace_size_in_bytes, fwd_ws_size);
}
if (filter_grad) {
// choose backward algorithm for filter
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardFilterAlgorithm(
handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_filter_desc,
CUDNN_CONVOLUTION_BWD_FILTER_SPECIFY_WORKSPACE_LIMIT,
workspace_size_limit, &filter_algo));
// get workspace for backwards filter algorithm
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnGetConvolutionBackwardFilterWorkspaceSize(
handle, cudnn_output_desc, cudnn_input_desc, cudnn_conv_desc,
cudnn_filter_desc, filter_algo, &bwd_filter_ws_size));
workspace_size_in_bytes =
std::max(workspace_size_in_bytes, bwd_filter_ws_size);
}
// ------------------- cudnn conv backward data ---------------------
// FIXME(typhoonzero): template type T may not be the same as cudnn call.
int input_offset = input->numel() / input->dims()[0] / groups;
int output_grad_offset = transformed_output_grad.numel() /
transformed_output_grad.dims()[0] / groups;
int filter_offset = filter->numel() / groups;
T alpha = static_cast<T>(1.0), beta = static_cast<T>(0.0);
auto workspace_handle = dev_ctx.cudnn_workspace_handle();
if (input_grad) {
T* input_grad_data = input_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset input_grad.
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionForward(
handle, &alpha, cudnn_output_desc,
output_grad_data + output_grad_offset * g, cudnn_filter_desc,
filter_data + filter_offset * g, cudnn_conv_desc, data_algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_input_desc, input_grad_data + input_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
if (data_layout == DataLayout::kNHWC) {
Tensor input_grad_transpose;
Tensor input_grad_nchw;
input_grad_nchw.ShareDataWith(*input_grad);
input_grad_nchw.Resize(framework::make_ddim(input_vec));
if (strides.size() == 2U) {
std::vector<int> axis = {0, 2, 3, 1};
DataTranspose<T, 4>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
} else if (strides.size() == 3U) {
std::vector<int> axis = {0, 2, 3, 4, 1};
DataTranspose<T, 5>(ctx, &input_grad_nchw, &input_grad_transpose,
axis);
*input_grad = input_grad_transpose;
}
}
}
// ------------------- cudnn conv backward filter ---------------------
if (filter_grad) {
T* filter_grad_data = filter_grad->mutable_data<T>(ctx.GetPlace());
// Because beta is zero, it is unnecessary to reset filter_grad.
// Gradient with respect to the filter
for (int g = 0; g < groups; g++) {
auto cudnn_func = [&](void* cudnn_workspace) {
PADDLE_ENFORCE_CUDA_SUCCESS(
platform::dynload::cudnnConvolutionBackwardFilter(
handle, &alpha, cudnn_output_desc,
output_grad_data + output_grad_offset * g, cudnn_input_desc,
input_data + input_offset * g, cudnn_conv_desc, filter_algo,
cudnn_workspace, workspace_size_in_bytes, &beta,
cudnn_filter_desc, filter_grad_data + filter_offset * g));
};
workspace_handle.RunFunc(cudnn_func, workspace_size_in_bytes);
}
}
}
};
} // namespace operators
} // namespace paddle
namespace ops = paddle::operators;
namespace plat = paddle::platform;
REGISTER_OP_KERNEL(conv2d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv2d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeOpKernel<plat::float16>,
ops::CUDNNConvTransposeOpKernel<float>,
ops::CUDNNConvTransposeOpKernel<double>);
REGISTER_OP_KERNEL(conv3d_transpose_grad, CUDNN, ::paddle::platform::CUDAPlace,
ops::CUDNNConvTransposeGradOpKernel<plat::float16>,
ops::CUDNNConvTransposeGradOpKernel<float>,
ops::CUDNNConvTransposeGradOpKernel<double>);
|
c56739d548eb1f03758ab5da8615e83cdf0ff6a3.hip | // !!! This is a file automatically generated by hipify!!!
#include <torch/types.h>
#include <hip/hip_runtime.h>
// cf. https://discuss.pytorch.org/t/error-when-building-an-extension/63317/7
// cf. https://discuss.pytorch.org/t/cuda-tensor-apply-in-extension-gives-undefined-symbol/56736/4
// #include <ATen/hip/HIPApplyUtils.cuh>
#include "CUDAApplyUtils.cuh"
// TORCH_CHECK replaces AT_CHECK in PyTorch 1,2, support 1.1 as well.
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#ifndef __CUDACC_EXTENDED_LAMBDA__
#error "please compile with --expt-extended-lambda"
#endif
namespace kernel {
#include "dsigmoid.h"
using at::cuda::CUDA_tensor_apply2;
using at::cuda::TensorArgType;
template <typename scalar_t>
void
d_sigmoid_kernel(
torch::Tensor &output,
const torch::Tensor &input
) {
CUDA_tensor_apply2<scalar_t,scalar_t>(
output, input,
[=] __host__ __device__ (scalar_t &out, const scalar_t &inp) {
d_sigmoid_func(out, inp);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly
);
}
} // namespace kernel
void
d_sigmoid_cuda(
torch::Tensor &output, const torch::Tensor &input
) {
auto in_arg = torch::TensorArg(input, "input", 0),
out_arg = torch::TensorArg(output, "output", 1);
torch::checkAllDefined("d_sigmoid_cuda", {in_arg, out_arg});
torch::checkAllSameGPU("d_sigmoid_cuda", {in_arg, out_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "d_sigmoid_cuda", [&] {
kernel::d_sigmoid_kernel<scalar_t>(output, input);
});
}
| c56739d548eb1f03758ab5da8615e83cdf0ff6a3.cu | #include <torch/types.h>
#include <cuda_runtime.h>
// cf. https://discuss.pytorch.org/t/error-when-building-an-extension/63317/7
// cf. https://discuss.pytorch.org/t/cuda-tensor-apply-in-extension-gives-undefined-symbol/56736/4
// #include <ATen/cuda/CUDAApplyUtils.cuh>
#include "CUDAApplyUtils.cuh"
// TORCH_CHECK replaces AT_CHECK in PyTorch 1,2, support 1.1 as well.
#ifndef TORCH_CHECK
#define TORCH_CHECK AT_CHECK
#endif
#ifndef __CUDACC_EXTENDED_LAMBDA__
#error "please compile with --expt-extended-lambda"
#endif
namespace kernel {
#include "dsigmoid.h"
using at::cuda::CUDA_tensor_apply2;
using at::cuda::TensorArgType;
template <typename scalar_t>
void
d_sigmoid_kernel(
torch::Tensor &output,
const torch::Tensor &input
) {
CUDA_tensor_apply2<scalar_t,scalar_t>(
output, input,
[=] __host__ __device__ (scalar_t &out, const scalar_t &inp) {
d_sigmoid_func(out, inp);
},
TensorArgType::ReadWrite, TensorArgType::ReadOnly
);
}
} // namespace kernel
void
d_sigmoid_cuda(
torch::Tensor &output, const torch::Tensor &input
) {
auto in_arg = torch::TensorArg(input, "input", 0),
out_arg = torch::TensorArg(output, "output", 1);
torch::checkAllDefined("d_sigmoid_cuda", {in_arg, out_arg});
torch::checkAllSameGPU("d_sigmoid_cuda", {in_arg, out_arg});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.scalar_type(), "d_sigmoid_cuda", [&] {
kernel::d_sigmoid_kernel<scalar_t>(output, input);
});
}
|
5504cd4d1ea49b4df561f79d5e89e700ce2bca32.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float * d_out, float * d_intermediate, float * d_in,
int size, bool usesSharedMemory)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (usesSharedMemory)
{
hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_intermediate, d_in);
}
else
{
hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0,
d_intermediate, d_in);
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
hipLaunchKernelGGL(( shmem_reduce_kernel), dim3(blocks), dim3(threads), threads * sizeof(float), 0,
d_out, d_intermediate);
}
else
{
hipLaunchKernelGGL(( global_reduce_kernel), dim3(blocks), dim3(threads), 0, 0,
d_out, d_intermediate);
}
}
int main(int argc, char **argv)
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
hipSetDevice(dev);
hipDeviceProp_t devProps;
if (hipGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 1 << 16;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [-1.0f, 1.0f]
h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f);
sum += h_in[i];
}
// declare GPU memory pointers
float * d_in, * d_intermediate, * d_out;
// allocate GPU memory
hipMalloc((void **) &d_in, ARRAY_BYTES);
hipMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
hipMalloc((void **) &d_out, sizeof(float));
// transfer the input array to the GPU
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running global reduce\n");
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
hipEventRecord(stop, 0);
break;
case 1:
printf("Running reduce with shared mem\n");
hipEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
hipEventRecord(stop, 0);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
hipEventSynchronize(stop);
float elapsedTime;
hipEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f; // 100 trials
// copy back the sum from GPU
float h_out;
hipMemcpy(&h_out, d_out, sizeof(float), hipMemcpyDeviceToHost);
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
hipFree(d_in);
hipFree(d_intermediate);
hipFree(d_out);
return 0;
}
| 5504cd4d1ea49b4df561f79d5e89e700ce2bca32.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
__global__ void global_reduce_kernel(float * d_out, float * d_in)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// do reduction in global mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
d_in[myId] += d_in[myId + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = d_in[myId];
}
}
__global__ void shmem_reduce_kernel(float * d_out, const float * d_in)
{
// sdata is allocated in the kernel call: 3rd arg to <<<b, t, shmem>>>
extern __shared__ float sdata[];
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
// load shared mem from global mem
sdata[tid] = d_in[myId];
__syncthreads(); // make sure entire block is loaded!
// do reduction in shared mem
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
d_out[blockIdx.x] = sdata[0];
}
}
void reduce(float * d_out, float * d_intermediate, float * d_in,
int size, bool usesSharedMemory)
{
// assumes that size is not greater than maxThreadsPerBlock^2
// and that size is a multiple of maxThreadsPerBlock
const int maxThreadsPerBlock = 1024;
int threads = maxThreadsPerBlock;
int blocks = size / maxThreadsPerBlock;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_intermediate, d_in);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_intermediate, d_in);
}
// now we're down to one block left, so reduce it
threads = blocks; // launch one thread for each block in prev step
blocks = 1;
if (usesSharedMemory)
{
shmem_reduce_kernel<<<blocks, threads, threads * sizeof(float)>>>
(d_out, d_intermediate);
}
else
{
global_reduce_kernel<<<blocks, threads>>>
(d_out, d_intermediate);
}
}
int main(int argc, char **argv)
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "error: no devices supporting CUDA.\n");
exit(EXIT_FAILURE);
}
int dev = 0;
cudaSetDevice(dev);
cudaDeviceProp devProps;
if (cudaGetDeviceProperties(&devProps, dev) == 0)
{
printf("Using device %d:\n", dev);
printf("%s; global mem: %dB; compute v%d.%d; clock: %d kHz\n",
devProps.name, (int)devProps.totalGlobalMem,
(int)devProps.major, (int)devProps.minor,
(int)devProps.clockRate);
}
const int ARRAY_SIZE = 1 << 16;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
float h_in[ARRAY_SIZE];
float sum = 0.0f;
for(int i = 0; i < ARRAY_SIZE; i++) {
// generate random float in [-1.0f, 1.0f]
h_in[i] = -1.0f + (float)rand()/((float)RAND_MAX/2.0f);
sum += h_in[i];
}
// declare GPU memory pointers
float * d_in, * d_intermediate, * d_out;
// allocate GPU memory
cudaMalloc((void **) &d_in, ARRAY_BYTES);
cudaMalloc((void **) &d_intermediate, ARRAY_BYTES); // overallocated
cudaMalloc((void **) &d_out, sizeof(float));
// transfer the input array to the GPU
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
int whichKernel = 0;
if (argc == 2) {
whichKernel = atoi(argv[1]);
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// launch the kernel
switch(whichKernel) {
case 0:
printf("Running global reduce\n");
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, false);
}
cudaEventRecord(stop, 0);
break;
case 1:
printf("Running reduce with shared mem\n");
cudaEventRecord(start, 0);
for (int i = 0; i < 100; i++)
{
reduce(d_out, d_intermediate, d_in, ARRAY_SIZE, true);
}
cudaEventRecord(stop, 0);
break;
default:
fprintf(stderr, "error: ran no kernel\n");
exit(EXIT_FAILURE);
}
cudaEventSynchronize(stop);
float elapsedTime;
cudaEventElapsedTime(&elapsedTime, start, stop);
elapsedTime /= 100.0f; // 100 trials
// copy back the sum from GPU
float h_out;
cudaMemcpy(&h_out, d_out, sizeof(float), cudaMemcpyDeviceToHost);
printf("average time elapsed: %f\n", elapsedTime);
// free GPU memory allocation
cudaFree(d_in);
cudaFree(d_intermediate);
cudaFree(d_out);
return 0;
}
|
d211bb0ae622abfcb91cb1c9e3457210b54fd2e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/math_ops.h"
namespace caffe2 {
struct SqrCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
math::Sqr<T, CUDAContext>(n, x, y, device_context);
}
};
template <typename T>
__global__ void SignKernel(int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = (-T(1) * (x[i] < 0)) + (x[i] > 0);
}
}
struct SignCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
hipLaunchKernelGGL(( SignKernel),
dim3(CAFFE_GET_BLOCKS(n)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
device_context->cuda_stream(), n, x, y);
}
};
REGISTER_CUDA_OPERATOR(
Sqr,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SqrCUDAFunctor>);
REGISTER_CUDA_OPERATOR(
Sign,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SignCUDAFunctor>);
}
| d211bb0ae622abfcb91cb1c9e3457210b54fd2e3.cu | /**
* Copyright (c) 2016-present, Facebook, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/math_ops.h"
namespace caffe2 {
struct SqrCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
math::Sqr<T, CUDAContext>(n, x, y, device_context);
}
};
template <typename T>
__global__ void SignKernel(int n, const T* x, T* y) {
CUDA_1D_KERNEL_LOOP(i, n) {
y[i] = (-T(1) * (x[i] < 0)) + (x[i] > 0);
}
}
struct SignCUDAFunctor {
template <typename T>
inline void
operator()(const int n, const T* x, T* y, CUDAContext* device_context) {
SignKernel<<<
CAFFE_GET_BLOCKS(n),
CAFFE_CUDA_NUM_THREADS,
0,
device_context->cuda_stream()>>>(n, x, y);
}
};
REGISTER_CUDA_OPERATOR(
Sqr,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SqrCUDAFunctor>);
REGISTER_CUDA_OPERATOR(
Sign,
UnaryElementwiseOp<TensorTypes<float>, CUDAContext, SignCUDAFunctor>);
}
|
0296742571711307c210dcf2266e8f365d1ada59.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <fftw3.h>
#include <hipfft.h>
using namespace std;
#define M 8
#define N 4
int main() {
hipDoubleComplex *a, *d_a;
a = new hipDoubleComplex[2*M*N];
hipMalloc(&d_a, 2*M*N*sizeof(hipDoubleComplex));
for (int i=0; i<2*M*N; i++) {
a[i] = make_cuDoubleComplex(i+N, 0);
}
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
fftw_complex *fft_doppler_buffer;
fftw_plan fft_doppler_plan;
fft_doppler_buffer = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
fft_doppler_plan = fftw_plan_dft_1d(N, fft_doppler_buffer, fft_doppler_buffer, FFTW_FORWARD, FFTW_ESTIMATE);
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
fft_doppler_buffer[j][0] = a[i*N+j].x;
fft_doppler_buffer[j][1] = a[i*N+j].y;
}
fftw_execute(fft_doppler_plan);
for (int j=0; j<N; j++) {
a[i*N+j] = make_cuDoubleComplex(fft_doppler_buffer[j][0], fft_doppler_buffer[j][1]);
}
}
fftw_destroy_plan(fft_doppler_plan);
fftw_free(fft_doppler_buffer);
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
for (int i=0; i<2*M*N; i++) {
a[i] = make_cuDoubleComplex(i+N, 0);
}
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
hipMemcpy(d_a, a, 2*M*N*sizeof(hipDoubleComplex), hipMemcpyHostToDevice);
hipfftHandle handle;
int rank = 1; // --- 1D FFTs
int nn[] = { N }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = N, odist = N; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = 2*M; // --- Number of batched executions
// hipfftPlanMany(&handle, rank, nn,
// inembed, istride, idist,
// onembed, ostride, odist, HIPFFT_Z2Z, batch);
hipfftPlan1d(&handle, N, HIPFFT_Z2Z, 2*M);
hipfftExecZ2Z(handle, d_a, d_a, HIPFFT_FORWARD);
hipMemcpy(a, d_a, 2*M*N*sizeof(hipDoubleComplex), hipMemcpyDeviceToHost);
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
return 0;
}
| 0296742571711307c210dcf2266e8f365d1ada59.cu | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuComplex.h>
#include <fftw3.h>
#include <cufft.h>
using namespace std;
#define M 8
#define N 4
int main() {
cuDoubleComplex *a, *d_a;
a = new cuDoubleComplex[2*M*N];
cudaMalloc(&d_a, 2*M*N*sizeof(cuDoubleComplex));
for (int i=0; i<2*M*N; i++) {
a[i] = make_cuDoubleComplex(i+N, 0);
}
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
fftw_complex *fft_doppler_buffer;
fftw_plan fft_doppler_plan;
fft_doppler_buffer = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
fft_doppler_plan = fftw_plan_dft_1d(N, fft_doppler_buffer, fft_doppler_buffer, FFTW_FORWARD, FFTW_ESTIMATE);
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
fft_doppler_buffer[j][0] = a[i*N+j].x;
fft_doppler_buffer[j][1] = a[i*N+j].y;
}
fftw_execute(fft_doppler_plan);
for (int j=0; j<N; j++) {
a[i*N+j] = make_cuDoubleComplex(fft_doppler_buffer[j][0], fft_doppler_buffer[j][1]);
}
}
fftw_destroy_plan(fft_doppler_plan);
fftw_free(fft_doppler_buffer);
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
for (int i=0; i<2*M*N; i++) {
a[i] = make_cuDoubleComplex(i+N, 0);
}
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
cudaMemcpy(d_a, a, 2*M*N*sizeof(cuDoubleComplex), cudaMemcpyHostToDevice);
cufftHandle handle;
int rank = 1; // --- 1D FFTs
int nn[] = { N }; // --- Size of the Fourier transform
int istride = 1, ostride = 1; // --- Distance between two successive input/output elements
int idist = N, odist = N; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = 2*M; // --- Number of batched executions
// cufftPlanMany(&handle, rank, nn,
// inembed, istride, idist,
// onembed, ostride, odist, CUFFT_Z2Z, batch);
cufftPlan1d(&handle, N, CUFFT_Z2Z, 2*M);
cufftExecZ2Z(handle, d_a, d_a, CUFFT_FORWARD);
cudaMemcpy(a, d_a, 2*M*N*sizeof(cuDoubleComplex), cudaMemcpyDeviceToHost);
cout << endl;
for (int i=0; i<2*M; i++) {
for (int j=0; j<N; j++) {
cout << "(" << a[i*N+j].x << "," << a[i*N+j].y << ") ";
}
cout << endl;
}
return 0;
}
|
3eeba92ff6ff0a3633a7c615d170bf45e017bfd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367H1 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2017 Bogdan Simion
* -------------
*/
#include "kernels.h"
__global__ void kernel2(const int8_t *filter, int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int32_t sum = 0;
__shared__ int32_t sdata_min[512];
__shared__ int32_t sdata_max[512];
unsigned int tid = threadIdx.x;
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
// int initial_off = dimension / 2;
int img_x = idx % width - dimension / 2;
int img_y = idx / width - dimension / 2;
if(idx < width * height){
for(int y = 0; y < dimension; y++){
for(int x = 0; x < dimension; x ++){
if(img_x >= 0 && img_x < width && img_y >= 0 && img_y < height){
int fil_pos = dimension * y + x;
int img_pos = width * img_y + img_x;
sum += input[img_pos] * filter[fil_pos];
}
img_x++;
}
img_y++;
img_x = idx % width - dimension / 2;
}
output[idx] = sum;
sdata_min[tid] = sum;
sdata_max[tid] = sum;
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 32; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
unsigned int blockSize = blockDim.x;
if (tid < 32) {
volatile int32_t* smem_max = sdata_max;
volatile int32_t* smem_min = sdata_min;
if (blockSize >= 64) {
if(smem_max[tid] < smem_max[tid + 32]){
smem_max[tid] = smem_max[tid + 32];
}
if(smem_min[tid + 32] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 32];
}
}
if (blockSize >= 32) {
if(smem_max[tid] < smem_max[tid + 16]){
smem_max[tid] = smem_max[tid + 16];
}
if(smem_min[tid + 16] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 16];
}
}
if (blockSize >= 16) {
if(smem_max[tid] < smem_max[tid + 8]){
smem_max[tid] = smem_max[tid + 8];
}
if(smem_min[tid + 8] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 8];
}
}
if (blockSize >= 8) {
if(smem_max[tid] < smem_max[tid + 4]){
smem_max[tid] = smem_max[tid + 4];
}
if(smem_min[tid + 4] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 4];
}
}
if (blockSize >= 4) {
if(smem_max[tid] < smem_max[tid + 2]){
smem_max[tid] = smem_max[tid + 2];
}
if(smem_min[tid + 2] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 2];
}
}
if (blockSize >= 2) {
if(smem_max[tid] < smem_max[tid + 1]){
smem_max[tid] = smem_max[tid + 1];
}
if(smem_min[tid + 1] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 1];
}
}
}
if (tid == 0) {
g_max[blockIdx.x] = sdata_max[0];
g_min[blockIdx.x] = sdata_min[0];
}
}
__global__ void normalize2(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (smallest != biggest && idx < width * height)
{
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
| 3eeba92ff6ff0a3633a7c615d170bf45e017bfd4.cu | /* ------------
* This code is provided solely for the personal and private use of
* students taking the CSC367H1 course at the University of Toronto.
* Copying for purposes other than this use is expressly prohibited.
* All forms of distribution of this code, whether as given or with
* any changes, are expressly prohibited.
*
* Authors: Bogdan Simion, Felipe de Azevedo Piovezan
*
* All of the files in this directory and all subdirectories are:
* Copyright (c) 2017 Bogdan Simion
* -------------
*/
#include "kernels.h"
__global__ void kernel2(const int8_t *filter, int32_t dimension,
const int32_t *input, int32_t *output, int32_t width, int32_t height, int32_t *g_max, int32_t *g_min)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
int32_t sum = 0;
__shared__ int32_t sdata_min[512];
__shared__ int32_t sdata_max[512];
unsigned int tid = threadIdx.x;
sdata_min[tid] = 999999;
sdata_max[tid] = -999999;
// int initial_off = dimension / 2;
int img_x = idx % width - dimension / 2;
int img_y = idx / width - dimension / 2;
if(idx < width * height){
for(int y = 0; y < dimension; y++){
for(int x = 0; x < dimension; x ++){
if(img_x >= 0 && img_x < width && img_y >= 0 && img_y < height){
int fil_pos = dimension * y + x;
int img_pos = width * img_y + img_x;
sum += input[img_pos] * filter[fil_pos];
}
img_x++;
}
img_y++;
img_x = idx % width - dimension / 2;
}
output[idx] = sum;
sdata_min[tid] = sum;
sdata_max[tid] = sum;
}
__syncthreads();
for (unsigned int s = blockDim.x/2; s > 32; s >>= 1) {
if (tid < s) {
if(sdata_max[tid] < sdata_max[tid + s]){
sdata_max[tid] = sdata_max[tid + s];
}
if(sdata_min[tid + s] < sdata_min[tid]){
sdata_min[tid] = sdata_min[tid + s];
}
}
__syncthreads();
}
unsigned int blockSize = blockDim.x;
if (tid < 32) {
volatile int32_t* smem_max = sdata_max;
volatile int32_t* smem_min = sdata_min;
if (blockSize >= 64) {
if(smem_max[tid] < smem_max[tid + 32]){
smem_max[tid] = smem_max[tid + 32];
}
if(smem_min[tid + 32] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 32];
}
}
if (blockSize >= 32) {
if(smem_max[tid] < smem_max[tid + 16]){
smem_max[tid] = smem_max[tid + 16];
}
if(smem_min[tid + 16] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 16];
}
}
if (blockSize >= 16) {
if(smem_max[tid] < smem_max[tid + 8]){
smem_max[tid] = smem_max[tid + 8];
}
if(smem_min[tid + 8] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 8];
}
}
if (blockSize >= 8) {
if(smem_max[tid] < smem_max[tid + 4]){
smem_max[tid] = smem_max[tid + 4];
}
if(smem_min[tid + 4] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 4];
}
}
if (blockSize >= 4) {
if(smem_max[tid] < smem_max[tid + 2]){
smem_max[tid] = smem_max[tid + 2];
}
if(smem_min[tid + 2] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 2];
}
}
if (blockSize >= 2) {
if(smem_max[tid] < smem_max[tid + 1]){
smem_max[tid] = smem_max[tid + 1];
}
if(smem_min[tid + 1] < smem_min[tid]){
smem_min[tid] = smem_min[tid + 1];
}
}
}
if (tid == 0) {
g_max[blockIdx.x] = sdata_max[0];
g_min[blockIdx.x] = sdata_min[0];
}
}
__global__ void normalize2(int32_t *image, int32_t width, int32_t height,
int32_t smallest, int32_t biggest)
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (smallest != biggest && idx < width * height)
{
image[idx] = ((image[idx] - smallest) * 255) / (biggest - smallest);
}
}
|
2c80a168c69c67414b55b2ad59e42f81cd6b1e7d.hip | // !!! This is a file automatically generated by hipify!!!
#include "ProjHelperFun.cu.h"
///////////// GIVEN CODE FROM SLIDES project.pdf p 17. /////////////
///////////// assumes z is the outer dim and all matrices are same dims.
//Note that it does not make a check on the outer dim, so num threads of outer
//dimz must be precise!
void transpose2d(REAL* A, REAL** B, int M, int N) {
for(int i = 0 ; i < M ; i++) {
for(int j = 0 ; j < N ; j++) {
//(*B)[j*M+i] = A[i*N+j];
(*B)[i*N+j] = A[j*M+i];
}
}
}
/////////////////////////// COPY METHODS ///////////////////////////
void globToDevice(PrivGlobs* globs, unsigned outer, unsigned size,
REAL** d_out, int type){
unsigned mem_size = outer*size*sizeof(REAL);
REAL* tmp = (REAL*) malloc(mem_size);
hipMalloc((void**)d_out, mem_size);
if(type == 1){ //myX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myX[i];
}
}
}
if(type == 2){ //myY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myY[i];
}
}
}
if(type == 3){ //myTimeline
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){ //2d works even though 3d
tmp[idx2d(j,i,size)] = globs[j].myTimeline[i];
}
}
}
if(type == 4){ //myResult
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myResult[i];
}
}
}
if(type == 5){ //myVarX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myVarX[i];
}
}
}
if(type == 6){ //myVarY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myVarY[i];
}
}
}
if(type == 7){ //myDxx
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myDxx[i];
}
}
}
if(type == 8){ //myDyy
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myDyy[i];
}
}
}
hipMemcpy(*d_out, tmp, mem_size, hipMemcpyHostToDevice);
free(tmp);
}
//frees d_in
void globFromDevice(PrivGlobs* globs, unsigned outer, unsigned size,
REAL* d_in, int type){
unsigned mem_size = outer*size*sizeof(REAL);
REAL* tmp = (REAL*) malloc(mem_size);
hipMemcpy(tmp, d_in, mem_size, hipMemcpyDeviceToHost);
if(type == 1){ //myX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myX[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 2){ //myY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myY[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 3){ //myTimeline
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myTimeline[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 4){ //myResult
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myResult[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 5){ //myVarX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myVarX[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 6){ //myVarY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myVarY[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 7){ //myDxx
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myDxx[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 8){ //myDyy
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myDyy[i] = tmp[idx2d(j,i,size)];
}
}
}
free(tmp);
hipFree(d_in);
} | 2c80a168c69c67414b55b2ad59e42f81cd6b1e7d.cu | #include "ProjHelperFun.cu.h"
///////////// GIVEN CODE FROM SLIDES project.pdf p 17. /////////////
///////////// assumes z is the outer dim and all matrices are same dims.
//Note that it does not make a check on the outer dim, so num threads of outer
//dimz must be precise!
void transpose2d(REAL* A, REAL** B, int M, int N) {
for(int i = 0 ; i < M ; i++) {
for(int j = 0 ; j < N ; j++) {
//(*B)[j*M+i] = A[i*N+j];
(*B)[i*N+j] = A[j*M+i];
}
}
}
/////////////////////////// COPY METHODS ///////////////////////////
void globToDevice(PrivGlobs* globs, unsigned outer, unsigned size,
REAL** d_out, int type){
unsigned mem_size = outer*size*sizeof(REAL);
REAL* tmp = (REAL*) malloc(mem_size);
cudaMalloc((void**)d_out, mem_size);
if(type == 1){ //myX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myX[i];
}
}
}
if(type == 2){ //myY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myY[i];
}
}
}
if(type == 3){ //myTimeline
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){ //2d works even though 3d
tmp[idx2d(j,i,size)] = globs[j].myTimeline[i];
}
}
}
if(type == 4){ //myResult
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myResult[i];
}
}
}
if(type == 5){ //myVarX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myVarX[i];
}
}
}
if(type == 6){ //myVarY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myVarY[i];
}
}
}
if(type == 7){ //myDxx
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myDxx[i];
}
}
}
if(type == 8){ //myDyy
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
tmp[idx2d(j,i,size)] = globs[j].myDyy[i];
}
}
}
cudaMemcpy(*d_out, tmp, mem_size, cudaMemcpyHostToDevice);
free(tmp);
}
//frees d_in
void globFromDevice(PrivGlobs* globs, unsigned outer, unsigned size,
REAL* d_in, int type){
unsigned mem_size = outer*size*sizeof(REAL);
REAL* tmp = (REAL*) malloc(mem_size);
cudaMemcpy(tmp, d_in, mem_size, cudaMemcpyDeviceToHost);
if(type == 1){ //myX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myX[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 2){ //myY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myY[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 3){ //myTimeline
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myTimeline[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 4){ //myResult
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myResult[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 5){ //myVarX
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myVarX[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 6){ //myVarY
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myVarY[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 7){ //myDxx
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myDxx[i] = tmp[idx2d(j,i,size)];
}
}
}
if(type == 8){ //myDyy
for(unsigned j=0; j<outer;j++){
for(unsigned i=0; i<size; i++){
globs[j].myDyy[i] = tmp[idx2d(j,i,size)];
}
}
}
free(tmp);
cudaFree(d_in);
} |
7dded521cd6be8f5898f91c6749572d6bee11796.hip | // !!! This is a file automatically generated by hipify!!!
/**
! nvcc RGB2Gray.cu -o main `pkg-config opencv --cflags --libs`
! ldd mian //
! cp -r /usr/local/lib/libopencv* /lib // /lib
*/
#include <iostream>
#include <hip/hip_runtime.h>
// #include <hip/hip_runtime.h>
// #include <hip/hip_runtime_api.h>
#include <cmath>
#include <ctime>
#include <opencv2/opencv.hpp>
#include "common/book.h"
// #include "common/image.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* blockid*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
#define get_ptr(image) ((unsigned char*)image.data)
#define image_size(image) (image.cols * image.rows * 3)
using namespace std;
using namespace cv;
typedef float FLOAT;
__device__
unsigned char FLOAT2uchar(FLOAT value)
{
if(value < 0)
value = 0;
else if(value > 255)
value = 255;
return (unsigned char)value;
//return saturate_cast<unsigned char>(value);
}
__global__ void rgb2gray(unsigned char *dev_RGBImg,unsigned char *dev_gray)
{
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
unsigned char R=dev_RGBImg[offset*3+0];
unsigned char G=dev_RGBImg[offset*3+1];
unsigned char B=dev_RGBImg[offset*3+2];
// dev_gray[offset]=(unsigned char) (R*0.299+G*0.587+B*0.114);
dev_gray[offset]=FLOAT2uchar(R*0.299+G*0.587+B*0.114);
}
int main(int argc,char* argv[])
{
mycout<<"RGB2Gray\n"<<
": ./main xxxx.jpg xxxx.jpg"<<endl;
if(argc<3) return -1;
//
Mat img = imread(argv[1], IMREAD_COLOR);
if (img.empty())
{
mycout <<"load image fail"<<endl;
return -1;
}
// Mat
Mat gray=Mat::zeros(img.size(),CV_8UC1); //
// BGR-->RGB(CV_8UC3)
Mat RGBImg;
cvtColor( img, RGBImg, COLOR_BGR2RGB);// opencv BGR
// ()
unsigned char* host_RGBImg=get_ptr(RGBImg); // unsigned char* (uchar)
unsigned char* host_gray=get_ptr(gray);
unsigned char *dev_RGBImg; // GPU
unsigned char *dev_gray; // GPU
// GPU
HANDLE_ERROR( hipMalloc( (void**)&dev_RGBImg,image_size(RGBImg) ) );
HANDLE_ERROR( hipMalloc( (void**)&dev_gray,image_size(gray)/3 ) );
HANDLE_ERROR( hipMemcpy( dev_RGBImg,host_RGBImg,image_size(RGBImg) ,hipMemcpyHostToDevice ) );
//
dim3 grid(RGBImg.rows,RGBImg.cols);
hipLaunchKernelGGL(( rgb2gray), dim3(grid),dim3(1), 0, 0, dev_RGBImg,dev_gray);
HANDLE_ERROR( hipMemcpy( host_gray,dev_gray,image_size(gray)/3,hipMemcpyDeviceToHost ) );
//
imwrite(argv[2],gray);
//
HANDLE_ERROR(hipFree(dev_RGBImg));
HANDLE_ERROR(hipFree(dev_gray));
return 0;
}
| 7dded521cd6be8f5898f91c6749572d6bee11796.cu | /**
! nvcc RGB2Gray.cu -o main `pkg-config opencv --cflags --libs`
! ldd mian // 查看缺失什么库
! cp -r /usr/local/lib/libopencv* /lib // 将库直接拷到/lib
*/
#include <iostream>
#include <cuda.h>
// #include <cuda_runtime.h>
// #include <cuda_runtime_api.h>
#include <cmath>
#include <ctime>
#include <opencv2/opencv.hpp>
#include "common/book.h"
// #include "common/image.h"
#define mycout cout<<"["<<__FILE__<<":"<<__LINE__<<"] "
/* 全局线程id get thread id: 1D block and 2D grid <<<(32,32),32>>>*/
#define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x) // 2D grid,1D block
// #define get_tid() (blockDim.x * (blockIdx.x + blockIdx.y * gridDim.x) + threadIdx.x+threadIdx.y*blockDim.x) // 2D grid,2D block
/* get block id: 2D grid */
#define get_bid() (blockIdx.x + blockIdx.y * gridDim.x)
/* 每个block的线程id*/
// #define get_tid_per_block() (threadIdx.x+threadIdx.y*blockDim.x) // 2D block
#define get_tid_per_block() (threadIdx.x)
#define get_ptr(image) ((unsigned char*)image.data)
#define image_size(image) (image.cols * image.rows * 3)
using namespace std;
using namespace cv;
typedef float FLOAT;
__device__
unsigned char FLOAT2uchar(FLOAT value)
{
if(value < 0)
value = 0;
else if(value > 255)
value = 255;
return (unsigned char)value;
//return saturate_cast<unsigned char>(value);
}
__global__ void rgb2gray(unsigned char *dev_RGBImg,unsigned char *dev_gray)
{
// map from blockIdx to pixel position
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
unsigned char R=dev_RGBImg[offset*3+0];
unsigned char G=dev_RGBImg[offset*3+1];
unsigned char B=dev_RGBImg[offset*3+2];
// dev_gray[offset]=(unsigned char) (R*0.299+G*0.587+B*0.114);
dev_gray[offset]=FLOAT2uchar(R*0.299+G*0.587+B*0.114);
}
int main(int argc,char* argv[])
{
mycout<<"RGB2Gray\n"<<
"输入格式: ./main xxxx.jpg xxxx.jpg"<<endl;
if(argc<3) return -1;
// 打开图片
Mat img = imread(argv[1], IMREAD_COLOR);
if (img.empty())
{
mycout <<"load image fail"<<endl;
return -1;
}
// 创建一个空的Mat
Mat gray=Mat::zeros(img.size(),CV_8UC1); // 灰度图只有一个通道
// 从BGR-->RGB(CV_8UC3)
Mat RGBImg;
cvtColor( img, RGBImg, COLOR_BGR2RGB);// opencv 默认是BGR格式
// 获取首元素地址(首行首元素地址)
unsigned char* host_RGBImg=get_ptr(RGBImg); // unsigned char* (uchar)
unsigned char* host_gray=get_ptr(gray);
unsigned char *dev_RGBImg; // GPU变量
unsigned char *dev_gray; // GPU变量
// GPU变量分配内存
HANDLE_ERROR( cudaMalloc( (void**)&dev_RGBImg,image_size(RGBImg) ) );
HANDLE_ERROR( cudaMalloc( (void**)&dev_gray,image_size(gray)/3 ) );
HANDLE_ERROR( cudaMemcpy( dev_RGBImg,host_RGBImg,image_size(RGBImg) ,cudaMemcpyHostToDevice ) );
// 启动核函数计算
dim3 grid(RGBImg.rows,RGBImg.cols);
rgb2gray<<<grid,1>>>(dev_RGBImg,dev_gray);
HANDLE_ERROR( cudaMemcpy( host_gray,dev_gray,image_size(gray)/3,cudaMemcpyDeviceToHost ) );
// 保存结果
imwrite(argv[2],gray);
// 释放内存
HANDLE_ERROR(cudaFree(dev_RGBImg));
HANDLE_ERROR(cudaFree(dev_gray));
return 0;
}
|
02d3b694952ae19747fc1531aa34502631cf7b45.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include <math.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
extern const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
extern int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
extern int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//hipMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//hipMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//hipMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), hipMemcpyHostToDevice);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = hipblasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
extern void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulli), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulliTanh), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSamplePoisson), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbEnergy), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbProb), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomDropout), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddColMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxAndAccumulate), dim3(gridDim),dim3(32), 0, 0, mat->data_device, acc->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kArgMaxColumnwise), dim3(gridDim),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSqSumColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kNormLimitColumnwise), dim3(gridDim),dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, norm, w, h);
hipDeviceSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyCos), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySin), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len, tiny);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCeil), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kFloor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, tiny);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropyBernoulli), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, tiny);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCorrectPreds), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, cutoff);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
hipDeviceSynchronize();
return 0;
}
extern int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kSparseDot), dim3(grid), dim3(threads), 0, 0, m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
hipDeviceSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddMultSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSinDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCosDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogisticDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kTanhDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearSmoothDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = hipblasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
hipLaunchKernelGGL(( kSwapColumns), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kGenerateTranslationsBigVarOff), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kBlockify), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMax), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxOverwrite), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrect), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_blosum90(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxBlosum90), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAccumulateColumns), dim3(h), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, tiny);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpand), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, indices->data_device, target->data_device, h, w, w2);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpandAndAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
hipDeviceSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
| 02d3b694952ae19747fc1531aa34502631cf7b45.cu | #include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "cudamat_kernels.cuh"
#include "cudamat.cuh"
extern "C" {
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
extern const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
extern int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
extern int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int init_random(rnd_struct* rnd_state, int seed, char* cudamatpath) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
FILE * pFile;
pFile = fopen (cudamatpath,"r");
for (int i = 0; i < NUM_RND_STREAMS; i++) {
fscanf (pFile, "%u", &host_mults[i]);
}
fclose (pFile);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
//cudaMalloc((void **)&rnd_state->dev_mults, NUM_RND_STREAMS * sizeof(unsigned int));
//cudaMalloc((void **)&rnd_state->dev_words, NUM_RND_STREAMS * sizeof(unsigned long long));
//cudaMemcpy(rnd_state->dev_mults, host_mults, NUM_RND_STREAMS * sizeof(unsigned int), cudaMemcpyHostToDevice);
cudaThreadSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
extern int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
extern int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
extern void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
extern void cuda_sync_threads() {
cudaThreadSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
extern int allocate_device_memory(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
extern int copy_to_host(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
extern int copy_to_device(cudamat* mat) {
int len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
extern int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
extern int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int reshape(cudamat* mat, unsigned int m, unsigned int n) {
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
extern int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
return 0;
}
extern int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
extern void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
}
extern void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
extern void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
extern int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
extern int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
extern int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
extern int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddColMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h);
cudaThreadSynchronize();
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (get_leading_dimension(mat1) != get_leading_dimension(target) ||
get_nonleading_dimension(mat2) != get_nonleading_dimension(target) ||
get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int m = get_leading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2);
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
cudaThreadSynchronize();
return 0;
}
extern float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
extern int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
extern int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
extern int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
extern int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_blosum90(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxBlosum90<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
extern int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
extern int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
cudaThreadSynchronize();
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
}
|
83cdc4dfdb0990db2091c1da3b34736424f8f167.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Histogram code with partial reductions and atomicAdd function
*/
#include <stdio.h>
#include <cutil_inline.h>
#define THREADBLOCK_SIZE 32
#define BIN_COUNT 64
uint GlobalSize = 10000;
uint histogramCount = 0;
unsigned int time_GPU = 0;
unsigned int time_CPU = 0;
int global_atomic = 0;
//Device code
int* h_A;
int* d_A;
int* d_PartialHistograms;
int* d_Histogram;
int* h_Out;
int* h_Timer;
int* d_Timer;
void ParseArguments(int, char**);
void Cleanup(void);
__global__ void histogram (int *d_PartialHistograms, int *d_Data, int dataCount, int* timer)
{
//Shared memory
__shared__ int s_Hist[THREADBLOCK_SIZE * BIN_COUNT];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int tid = thread
clock_t start_block;
clock_t stop_block;
if(threadIdx.x == 0) start_block = clock();
for(int i = 0; i <BIN_COUNT; i++)
s_Hist[threadIdx.x* THREADBLOCK_SIZE +i] = 0;
int THREAD_N = blockDim.x * gridDim.x;
for (int pos=tid; pos < dataCount; pos = pos + THREAD_N)
{
//int data = d_Data[pos];
++s_Hist[d_Data[pos]+threadIdx.x*BIN_COUNT] ;
}
__syncthreads();
for(int i = 0; i < BIN_COUNT; i++)
{
d_PartialHistograms[tid*BIN_COUNT + i] = s_Hist[threadIdx.x*THREADBLOCK_SIZE +i];
// atomicAdd(&d_PartialHistograms[blockIdx.x*BIN_COUNT + i], s_Hist[threadIdx.x*32 +i]);
}
if(threadIdx.x==0)
{
stop_block = clock();
timer[2*blockIdx.x] = start_block;
timer[2*blockIdx.x + 1]=stop_block;
}
}
__global__ void mergeHistogram (int *d_Histogram, int *d_PartialHistograms, int histogramCount) {
__shared__ int data[THREADBLOCK_SIZE];
int sum = 0;
for(int i=threadIdx.x; i<histogramCount; i += THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i*BIN_COUNT];
data[threadIdx.x] = sum;
for(int stride = THREADBLOCK_SIZE/2;stride>0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
__global__ void histogram_atomic_kernel(int* d_PartialHistograms, int* d_Data, int dataCount, int* timer)
{
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
clock_t start_atomic;
clock_t stop_atomic;
// Shared memory size declared at kernel launch
extern __shared__ int s_Hist[];
if(tid == 0)
{
start_atomic = clock();
}
// Initializing histogram
for(int i = 0; i< BIN_COUNT; i++)
{
s_Hist[tid*BIN_COUNT+i]=0;
}
// Filling the histogram array
for(int pos=gid; pos < dataCount; pos += stride)
{
s_Hist[tid*BIN_COUNT+d_Data[pos]]++;
}
__syncthreads();
for(int thread_hist = 0; thread_hist < blockDim.x; thread_hist++)
{
atomicAdd(&d_PartialHistograms[tid],s_Hist[thread_hist*BIN_COUNT+tid]);
atomicAdd(&d_PartialHistograms[tid+blockDim.x],s_Hist[thread_hist*BIN_COUNT+tid+blockDim.x]);
}
if(tid == 0)
{
stop_atomic = clock();
timer[blockIdx.x] = stop_atomic - start_atomic;
}
}
int main (int argc, char** argv)
{
ParseArguments(argc, argv);
int N = GlobalSize;
int AtomicCheck = global_atomic;
printf("Histogram Size %d\n", N);
if(AtomicCheck)
printf("Using Atomic add\n");
size_t size = N*sizeof(int);
int sharedMemSize = THREADBLOCK_SIZE*BIN_COUNT*sizeof(int);
size_t atomic_hist_size = sizeof(int)*BIN_COUNT;
histogramCount = (GlobalSize+31)/THREADBLOCK_SIZE;
int result[BIN_COUNT];
int timer_size = 2*histogramCount*sizeof(int);
h_A = (int*)(malloc(size));
h_Timer = (int *)malloc(timer_size);
//histogramCount = (GlobalSize+31)/THREADBLOCK_SIZE;
//hipEvent_t start_cpu, stop_cpu, start_gpu, stop_gpu;
// float time_cpu = 0.0, time_gpu = 0.0;
//Create timers
cutilCheckError(cutCreateTimer(&time_GPU));
cutilCheckError(cutCreateTimer(&time_CPU));
/*hipEventCreate(&start_gpu);
hipEventCreate(&stop_gpu);
hipEventCreate(&start_cpu);
hipEventCreate(&stop_cpu);
*/
srand(1);
for (int i = 0; i < GlobalSize; ++i)
h_A[i] = rand()%BIN_COUNT;
//hipEventRecord(start_cpu,0);
cutilCheckError(cutStartTimer(time_CPU));
for (int i = 0; i < BIN_COUNT; i++)
result[i] = 0;
for (int i = 0; i < N; i++)
result[h_A[i]]++;
//hipEventRecord(stop_cpu,0);
//hipEventSynchronize(stop_cpu);
cutilCheckError(cutStopTimer(time_CPU));
size_t partialsize = (histogramCount*THREADBLOCK_SIZE*BIN_COUNT)*sizeof(int);
size_t newsize = (histogramCount * BIN_COUNT)*sizeof(int);
if(AtomicCheck){
h_Out = (int*)(malloc(atomic_hist_size));
}
else{
h_Out = (int*)(malloc(partialsize));
}
printf("Allocate h_Out");
cutilSafeCall(hipMalloc((void**)&d_A, size));
if(AtomicCheck){
cutilSafeCall(hipMalloc((void**)&d_PartialHistograms, atomic_hist_size));
cutilSafeCall( hipMemset(d_PartialHistograms, 0, atomic_hist_size));
}
else{
cutilSafeCall(hipMalloc((void**)&d_PartialHistograms, partialsize));
cutilSafeCall( hipMemset(d_PartialHistograms, 0, partialsize));
}
cutilSafeCall(hipMalloc((void**)&d_Histogram, newsize));
cutilSafeCall( hipMalloc((void**)&d_Timer, timer_size) );
printf("\nDevice allocated\n");
cutilSafeCall(hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice));
printf("Memcpy done\n");
printf("HC %d ThreadSize %d \n", histogramCount, THREADBLOCK_SIZE);
//hipEventRecord(start_gpu,0);
if(!AtomicCheck){
cutilCheckError(cutStartTimer(time_GPU));
hipLaunchKernelGGL(( histogram), dim3(histogramCount), dim3(THREADBLOCK_SIZE), 0, 0, d_PartialHistograms, d_A, GlobalSize, d_Timer);
//hipEventRecord(stop_gpu,0);
//hipEventSynchronize(stop_gpu);
cutilCheckMsg("kernel launch failure");
cutilSafeCall( hipDeviceSynchronize() ); // Have host wait for kernel
cutilCheckError(cutStopTimer(time_GPU));
//cutilSafeCall(hipMemcpy(h_Out, d_PartialHistograms, partialsize, hipMemcpyDeviceToHost));
//cutilSafeCall(hipMemcpy(h_Timer, d_Timer, timer_size, hipMemcpyDeviceToHost));
hipLaunchKernelGGL(( mergeHistogram), dim3(BIN_COUNT), dim3(THREADBLOCK_SIZE), 0, 0, d_Histogram,d_PartialHistograms,histogramCount);
cutilSafeCall( hipDeviceSynchronize() ); // Have host wait for kernel
}
else{
cutilCheckError(cutStartTimer(time_GPU));
hipLaunchKernelGGL(( histogram_atomic_kernel), dim3(histogramCount), dim3(THREADBLOCK_SIZE), sharedMemSize, 0, d_PartialHistograms, d_A, GlobalSize, d_Timer);
cutilSafeCall( hipDeviceSynchronize() ); // Have host wait for kernel
cutilCheckError(cutStopTimer(time_GPU));
}
if(!AtomicCheck){
cutilSafeCall(hipMemcpy(h_Out, d_PartialHistograms, partialsize, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(h_Timer, d_Timer, timer_size, hipMemcpyDeviceToHost));
}
else{
cutilSafeCall(hipMemcpy(h_Out, d_PartialHistograms, atomic_hist_size, hipMemcpyDeviceToHost));
cutilSafeCall(hipMemcpy(h_Timer, d_Timer, timer_size, hipMemcpyDeviceToHost));
}
int gpuresult[BIN_COUNT]={0};
for(int i=0; i<BIN_COUNT; i++)
{
if(!AtomicCheck){
for(int j=0;j<histogramCount*THREADBLOCK_SIZE;j++)
{
gpuresult[i] = gpuresult[i] + h_Out[j*BIN_COUNT + i];
// gpuresult[i] = h_Out[i];
// printf(" %d ", h_Out[j*BIN_COUNT + i]);
}
printf("CPU %d GPU %d \n", result[i],gpuresult[i]);
}
else{
printf("CPU %d GPU %d \n", result[i],h_Out[i]);
}
}
/*hipEventElapsedTime(&time_cpu,start_cpu,stop_cpu);
hipEventDestroy(start_cpu);
hipEventDestroy(stop_cpu);
hipEventElapsedTime(&time_gpu,start_gpu,stop_gpu);
hipEventDestroy(start_gpu);
hipEventDestroy(stop_gpu);
*/
int xx=0;
for(int a = 0; a < histogramCount; a++)
{
xx++;
//printf("\n%d %d", h_Timer[a*2+1], h_Timer[a*2]);
}
printf("\n%d\n",xx);
printf("CUDA Event CPU time: %f\n",cutGetTimerValue(time_CPU));
printf("CUDA Event GPU time: %f\n",cutGetTimerValue(time_GPU));
printf("CUDA Event speed up: %f\n",cutGetTimerValue(time_CPU)/cutGetTimerValue(time_GPU));
Cleanup();
}
void Cleanup (void)
{
exit(0);
}
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--atomic") == 0 || strcmp(argv[i], "-atomic") == 0)
{
global_atomic = 1;
i = i + 1;
}
}
}
| 83cdc4dfdb0990db2091c1da3b34736424f8f167.cu | /*
Histogram code with partial reductions and atomicAdd function
*/
#include <stdio.h>
#include <cutil_inline.h>
#define THREADBLOCK_SIZE 32
#define BIN_COUNT 64
uint GlobalSize = 10000;
uint histogramCount = 0;
unsigned int time_GPU = 0;
unsigned int time_CPU = 0;
int global_atomic = 0;
//Device code
int* h_A;
int* d_A;
int* d_PartialHistograms;
int* d_Histogram;
int* h_Out;
int* h_Timer;
int* d_Timer;
void ParseArguments(int, char**);
void Cleanup(void);
__global__ void histogram (int *d_PartialHistograms, int *d_Data, int dataCount, int* timer)
{
//Shared memory
__shared__ int s_Hist[THREADBLOCK_SIZE * BIN_COUNT];
int tid = threadIdx.x + blockIdx.x * blockDim.x;
//int tid = thread
clock_t start_block;
clock_t stop_block;
if(threadIdx.x == 0) start_block = clock();
for(int i = 0; i <BIN_COUNT; i++)
s_Hist[threadIdx.x* THREADBLOCK_SIZE +i] = 0;
int THREAD_N = blockDim.x * gridDim.x;
for (int pos=tid; pos < dataCount; pos = pos + THREAD_N)
{
//int data = d_Data[pos];
++s_Hist[d_Data[pos]+threadIdx.x*BIN_COUNT] ;
}
__syncthreads();
for(int i = 0; i < BIN_COUNT; i++)
{
d_PartialHistograms[tid*BIN_COUNT + i] = s_Hist[threadIdx.x*THREADBLOCK_SIZE +i];
// atomicAdd(&d_PartialHistograms[blockIdx.x*BIN_COUNT + i], s_Hist[threadIdx.x*32 +i]);
}
if(threadIdx.x==0)
{
stop_block = clock();
timer[2*blockIdx.x] = start_block;
timer[2*blockIdx.x + 1]=stop_block;
}
}
__global__ void mergeHistogram (int *d_Histogram, int *d_PartialHistograms, int histogramCount) {
__shared__ int data[THREADBLOCK_SIZE];
int sum = 0;
for(int i=threadIdx.x; i<histogramCount; i += THREADBLOCK_SIZE)
sum += d_PartialHistograms[blockIdx.x + i*BIN_COUNT];
data[threadIdx.x] = sum;
for(int stride = THREADBLOCK_SIZE/2;stride>0; stride >>= 1){
__syncthreads();
if(threadIdx.x < stride)
data[threadIdx.x] += data[threadIdx.x + stride];
}
if(threadIdx.x == 0)
d_Histogram[blockIdx.x] = data[0];
}
__global__ void histogram_atomic_kernel(int* d_PartialHistograms, int* d_Data, int dataCount, int* timer)
{
unsigned int tid = threadIdx.x;
unsigned int gid = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int stride = blockDim.x * gridDim.x;
clock_t start_atomic;
clock_t stop_atomic;
// Shared memory size declared at kernel launch
extern __shared__ int s_Hist[];
if(tid == 0)
{
start_atomic = clock();
}
// Initializing histogram
for(int i = 0; i< BIN_COUNT; i++)
{
s_Hist[tid*BIN_COUNT+i]=0;
}
// Filling the histogram array
for(int pos=gid; pos < dataCount; pos += stride)
{
s_Hist[tid*BIN_COUNT+d_Data[pos]]++;
}
__syncthreads();
for(int thread_hist = 0; thread_hist < blockDim.x; thread_hist++)
{
atomicAdd(&d_PartialHistograms[tid],s_Hist[thread_hist*BIN_COUNT+tid]);
atomicAdd(&d_PartialHistograms[tid+blockDim.x],s_Hist[thread_hist*BIN_COUNT+tid+blockDim.x]);
}
if(tid == 0)
{
stop_atomic = clock();
timer[blockIdx.x] = stop_atomic - start_atomic;
}
}
int main (int argc, char** argv)
{
ParseArguments(argc, argv);
int N = GlobalSize;
int AtomicCheck = global_atomic;
printf("Histogram Size %d\n", N);
if(AtomicCheck)
printf("Using Atomic add\n");
size_t size = N*sizeof(int);
int sharedMemSize = THREADBLOCK_SIZE*BIN_COUNT*sizeof(int);
size_t atomic_hist_size = sizeof(int)*BIN_COUNT;
histogramCount = (GlobalSize+31)/THREADBLOCK_SIZE;
int result[BIN_COUNT];
int timer_size = 2*histogramCount*sizeof(int);
h_A = (int*)(malloc(size));
h_Timer = (int *)malloc(timer_size);
//histogramCount = (GlobalSize+31)/THREADBLOCK_SIZE;
//cudaEvent_t start_cpu, stop_cpu, start_gpu, stop_gpu;
// float time_cpu = 0.0, time_gpu = 0.0;
//Create timers
cutilCheckError(cutCreateTimer(&time_GPU));
cutilCheckError(cutCreateTimer(&time_CPU));
/*cudaEventCreate(&start_gpu);
cudaEventCreate(&stop_gpu);
cudaEventCreate(&start_cpu);
cudaEventCreate(&stop_cpu);
*/
srand(1);
for (int i = 0; i < GlobalSize; ++i)
h_A[i] = rand()%BIN_COUNT;
//cudaEventRecord(start_cpu,0);
cutilCheckError(cutStartTimer(time_CPU));
for (int i = 0; i < BIN_COUNT; i++)
result[i] = 0;
for (int i = 0; i < N; i++)
result[h_A[i]]++;
//cudaEventRecord(stop_cpu,0);
//cudaEventSynchronize(stop_cpu);
cutilCheckError(cutStopTimer(time_CPU));
size_t partialsize = (histogramCount*THREADBLOCK_SIZE*BIN_COUNT)*sizeof(int);
size_t newsize = (histogramCount * BIN_COUNT)*sizeof(int);
if(AtomicCheck){
h_Out = (int*)(malloc(atomic_hist_size));
}
else{
h_Out = (int*)(malloc(partialsize));
}
printf("Allocate h_Out");
cutilSafeCall(cudaMalloc((void**)&d_A, size));
if(AtomicCheck){
cutilSafeCall(cudaMalloc((void**)&d_PartialHistograms, atomic_hist_size));
cutilSafeCall( cudaMemset(d_PartialHistograms, 0, atomic_hist_size));
}
else{
cutilSafeCall(cudaMalloc((void**)&d_PartialHistograms, partialsize));
cutilSafeCall( cudaMemset(d_PartialHistograms, 0, partialsize));
}
cutilSafeCall(cudaMalloc((void**)&d_Histogram, newsize));
cutilSafeCall( cudaMalloc((void**)&d_Timer, timer_size) );
printf("\nDevice allocated\n");
cutilSafeCall(cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice));
printf("Memcpy done\n");
printf("HC %d ThreadSize %d \n", histogramCount, THREADBLOCK_SIZE);
//cudaEventRecord(start_gpu,0);
if(!AtomicCheck){
cutilCheckError(cutStartTimer(time_GPU));
histogram<<<histogramCount, THREADBLOCK_SIZE>>>(d_PartialHistograms, d_A, GlobalSize, d_Timer);
//cudaEventRecord(stop_gpu,0);
//cudaEventSynchronize(stop_gpu);
cutilCheckMsg("kernel launch failure");
cutilSafeCall( cudaThreadSynchronize() ); // Have host wait for kernel
cutilCheckError(cutStopTimer(time_GPU));
//cutilSafeCall(cudaMemcpy(h_Out, d_PartialHistograms, partialsize, cudaMemcpyDeviceToHost));
//cutilSafeCall(cudaMemcpy(h_Timer, d_Timer, timer_size, cudaMemcpyDeviceToHost));
mergeHistogram<<<BIN_COUNT, THREADBLOCK_SIZE>>>(d_Histogram,d_PartialHistograms,histogramCount);
cutilSafeCall( cudaThreadSynchronize() ); // Have host wait for kernel
}
else{
cutilCheckError(cutStartTimer(time_GPU));
histogram_atomic_kernel<<<histogramCount, THREADBLOCK_SIZE, sharedMemSize>>>(d_PartialHistograms, d_A, GlobalSize, d_Timer);
cutilSafeCall( cudaThreadSynchronize() ); // Have host wait for kernel
cutilCheckError(cutStopTimer(time_GPU));
}
if(!AtomicCheck){
cutilSafeCall(cudaMemcpy(h_Out, d_PartialHistograms, partialsize, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(h_Timer, d_Timer, timer_size, cudaMemcpyDeviceToHost));
}
else{
cutilSafeCall(cudaMemcpy(h_Out, d_PartialHistograms, atomic_hist_size, cudaMemcpyDeviceToHost));
cutilSafeCall(cudaMemcpy(h_Timer, d_Timer, timer_size, cudaMemcpyDeviceToHost));
}
int gpuresult[BIN_COUNT]={0};
for(int i=0; i<BIN_COUNT; i++)
{
if(!AtomicCheck){
for(int j=0;j<histogramCount*THREADBLOCK_SIZE;j++)
{
gpuresult[i] = gpuresult[i] + h_Out[j*BIN_COUNT + i];
// gpuresult[i] = h_Out[i];
// printf(" %d ", h_Out[j*BIN_COUNT + i]);
}
printf("CPU %d GPU %d \n", result[i],gpuresult[i]);
}
else{
printf("CPU %d GPU %d \n", result[i],h_Out[i]);
}
}
/*cudaEventElapsedTime(&time_cpu,start_cpu,stop_cpu);
cudaEventDestroy(start_cpu);
cudaEventDestroy(stop_cpu);
cudaEventElapsedTime(&time_gpu,start_gpu,stop_gpu);
cudaEventDestroy(start_gpu);
cudaEventDestroy(stop_gpu);
*/
int xx=0;
for(int a = 0; a < histogramCount; a++)
{
xx++;
//printf("\n%d %d", h_Timer[a*2+1], h_Timer[a*2]);
}
printf("\n%d\n",xx);
printf("CUDA Event CPU time: %f\n",cutGetTimerValue(time_CPU));
printf("CUDA Event GPU time: %f\n",cutGetTimerValue(time_GPU));
printf("CUDA Event speed up: %f\n",cutGetTimerValue(time_CPU)/cutGetTimerValue(time_GPU));
Cleanup();
}
void Cleanup (void)
{
exit(0);
}
void ParseArguments(int argc, char** argv)
{
for (int i = 0; i < argc; ++i)
{
if (strcmp(argv[i], "--size") == 0 || strcmp(argv[i], "-size") == 0)
{
GlobalSize = atoi(argv[i+1]);
i = i + 1;
}
if (strcmp(argv[i], "--atomic") == 0 || strcmp(argv[i], "-atomic") == 0)
{
global_atomic = 1;
i = i + 1;
}
}
}
|
053db89fcf6a8d6fd9506f4d36692239d8e10de6.hip | // !!! This is a file automatically generated by hipify!!!
#include "utils.h"
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Copyright 2019 Naga V Gudapati. All rights reserved.
//#code taken from fluent cpp which splits a string into a vector using delimiters
void split(std::vector<std::string>& tokens, const std::string& s, char delimiter)
{
tokens.clear();
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter))
{
tokens.push_back(token);
}
}
void get_CRSM_from_svm(Classification_Data_CRS &M, const std::string &file_path){
std::string path = file_path;
std::vector<std::string> tokens(2);
std::ifstream libsvm_file(path);
if (libsvm_file.is_open()) {
std::cout << "Processing the SVM file" << std::endl;
std::string observation;
M.row_ptr.push_back(0); //First is always 0 ???
M.n = 0; //This will be used to store the number of columns
while (getline(libsvm_file, observation)) {
//Splitting on whitespace as some SVMS have more than one space character or a tab character
std::istringstream iss_obs(observation);
std::vector<std::string> splitString(std::istream_iterator<std::string>{iss_obs}, std::istream_iterator<std::string>());
//I am pushing back the label to the y_label vector
M.y_label.push_back(std::stoi(splitString[0]));
// This will iterate from the second element onwards, then split at : and push the first
// value into col_index and second values into the values vectors.
for (auto iter = std::next(splitString.begin()); iter != splitString.end(); ++iter) {
split(tokens, *iter, ':');
auto& col_value = tokens;
M.col_index.push_back(std::stoi(col_value[0])-1);
M.values.push_back(std::stof(col_value[1]));
if (M.n < std::stoi(col_value[0])) { //We keep track of the largest n which will give us the value of largest feature number
M.n = std::stoi(col_value[0]);
}
}
M.row_ptr.push_back(static_cast<int>(M.col_index.size()));
}
libsvm_file.close();
}
else {
std::cout << "Could not find the SMV file, check again!" << std::endl;
}
//numRows will be given by the rowpointer size -1
M.m = static_cast<int>(M.row_ptr.size())-1;
M.nzmax = static_cast<long long>(M.values.size());
//Normaliztion of the problem data. This is just normalizing each observation.
for (int i = 0; i < M.m; i++) {
//Let us normalize the feature values of each observation
// Step 1) calculate the norm of all the features belonging to a single observation
// Step 2) divide each feature value of every observation using the respective observation's norm
//Step 1):
auto norm_sqrd = 0.0;
for(auto j = M.row_ptr[i]; j < M.row_ptr[i+1]; j++){
assert(j < M.values.size());
norm_sqrd += ::pow(M.values[j], 2);
}
auto norm = std::sqrt(norm_sqrd);
//Step 2):
for(auto j = M.row_ptr[i]; j < M.row_ptr[i+1]; j++){
M.values[j] = M.values[j]/norm;
}
}
std::cout << "Finished processing the LIBSVM file. " << M.m << " observations and " << M.n
<< " features were read. The total number of non-zero elements are: " << M.nzmax << std::endl;
}
| 053db89fcf6a8d6fd9506f4d36692239d8e10de6.cu | #include "utils.h"
long long get_time() {
struct timeval tv;
gettimeofday(&tv, NULL);
return (tv.tv_sec * 1000000) +tv.tv_usec;
}
// Copyright © 2019 Naga V Gudapati. All rights reserved.
//#code taken from fluent cpp which splits a string into a vector using delimiters
void split(std::vector<std::string>& tokens, const std::string& s, char delimiter)
{
tokens.clear();
std::string token;
std::istringstream tokenStream(s);
while (std::getline(tokenStream, token, delimiter))
{
tokens.push_back(token);
}
}
void get_CRSM_from_svm(Classification_Data_CRS &M, const std::string &file_path){
std::string path = file_path;
std::vector<std::string> tokens(2);
std::ifstream libsvm_file(path);
if (libsvm_file.is_open()) {
std::cout << "Processing the SVM file" << std::endl;
std::string observation;
M.row_ptr.push_back(0); //First is always 0 ???
M.n = 0; //This will be used to store the number of columns
while (getline(libsvm_file, observation)) {
//Splitting on whitespace as some SVMS have more than one space character or a tab character
std::istringstream iss_obs(observation);
std::vector<std::string> splitString(std::istream_iterator<std::string>{iss_obs}, std::istream_iterator<std::string>());
//I am pushing back the label to the y_label vector
M.y_label.push_back(std::stoi(splitString[0]));
// This will iterate from the second element onwards, then split at : and push the first
// value into col_index and second values into the values vectors.
for (auto iter = std::next(splitString.begin()); iter != splitString.end(); ++iter) {
split(tokens, *iter, ':');
auto& col_value = tokens;
M.col_index.push_back(std::stoi(col_value[0])-1);
M.values.push_back(std::stof(col_value[1]));
if (M.n < std::stoi(col_value[0])) { //We keep track of the largest n which will give us the value of largest feature number
M.n = std::stoi(col_value[0]);
}
}
M.row_ptr.push_back(static_cast<int>(M.col_index.size()));
}
libsvm_file.close();
}
else {
std::cout << "Could not find the SMV file, check again!" << std::endl;
}
//numRows will be given by the rowpointer size -1
M.m = static_cast<int>(M.row_ptr.size())-1;
M.nzmax = static_cast<long long>(M.values.size());
//Normaliztion of the problem data. This is just normalizing each observation.
for (int i = 0; i < M.m; i++) {
//Let us normalize the feature values of each observation
// Step 1) calculate the norm of all the features belonging to a single observation
// Step 2) divide each feature value of every observation using the respective observation's norm
//Step 1):
auto norm_sqrd = 0.0;
for(auto j = M.row_ptr[i]; j < M.row_ptr[i+1]; j++){
assert(j < M.values.size());
norm_sqrd += std::pow(M.values[j], 2);
}
auto norm = std::sqrt(norm_sqrd);
//Step 2):
for(auto j = M.row_ptr[i]; j < M.row_ptr[i+1]; j++){
M.values[j] = M.values[j]/norm;
}
}
std::cout << "Finished processing the LIBSVM file. " << M.m << " observations and " << M.n
<< " features were read. The total number of non-zero elements are: " << M.nzmax << std::endl;
}
|
fa872ec3ef60387ded2e44053fc85181cc1fe415.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMath.hip"
#else
#include "ATen/hip/HIPContext.h"
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(hipGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(hipMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(hipGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
// previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
// to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific
// size (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
int i, j, cohortMax;
int64_t offset;
bool hasSkippedInput = false;
THCTensor *notSkippedTensor = NULL; // non-owning reference
auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; };
int nDims = 0;
for (i = 0; i < numInputs; i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i]->dim();
notSkippedTensor = inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension);
std::vector<int64_t> size(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension);
cat_dim_size += THCTensor_(size)(state, tensor, dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim);
if (dim == dimension) {
result_dim_size = cat_dim_size;
}
size[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, {});
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasSkippedInput &&
result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
scalar_t *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
at::hip::HIPStreamMasqueradingAsCUDA stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
hipLaunchKernelGGL(( CatArrayBatchedCopy<scalar_t, unsigned int, DIMS>), dim3(catGrid), dim3(applyBlock), 0, stream.stream(), data, d_inputs, param, dimension, param.outputStride[dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize);
CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get());
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension);
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(hipMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<scalar_t, unsigned int>),
hipMemcpyHostToDevice,
stream.stream()));
THCudaHostRecord(state, stackInputs);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(hipGetLastError());
}
THCudaFree(state, d_inputs);
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension);
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
hipStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if TORCH_HIP_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::hip::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(hipGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyFromDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
hipLaunchKernelGGL(( THCTensor_copyToDiagonal<scalar_t>), dim3(grid), dim3(threads), 0, THCState_getCurrentStream(state),
THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(hipGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
| fa872ec3ef60387ded2e44053fc85181cc1fe415.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMath.cu"
#else
#include "ATen/cuda/CUDAContext.h"
void THCTensor_(fill)(THCState* state, THCTensor *self_, scalar_t value)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (!THC_pointwiseApply1<scalar_t>(
state, self_, TensorFillOp<scalar_t>(value))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
THCudaCheck(cudaGetLastError());
}
void THCTensor_(zero)(THCState *state, THCTensor *self_)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_));
if (THCTensor_(isContiguous)(state, self_)) {
THCudaCheck(cudaMemsetAsync(THCTensor_(data)(state, self_),
0,
sizeof(scalar_t) * THCTensor_(nElement)(state, self_),
THCState_getCurrentStream(state)));
} else {
if (!THC_pointwiseApply1<scalar_t>(
state, self_,
TensorFillOp<scalar_t>(ScalarConvert<int, scalar_t>::to(0)))) {
THArgCheck(false, 1, CUTORCH_DIM_WARNING);
}
}
THCudaCheck(cudaGetLastError());
}
ptrdiff_t
THCTensor_(numel)(THCState *state, THCTensor *t)
{
return THCTensor_(nElement)(state, t);
}
void THCTensor_(cat)(THCState *state, THCTensor *result,
THCTensor *ta, THCTensor *tb, int dimension)
{
THCTensor* inputs[2];
inputs[0] = ta;
inputs[1] = tb;
THCTensor_(catArray)(state, result, inputs, 2, dimension);
}
void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension);
inline void THCTensor_(check_shape_except_dim)(THCState *state,
THCTensor *first, THCTensor *second, int dimension)
{
int first_dims = first->dim();
int second_dims = second->dim();
THArgCheck(first_dims == second_dims, 0,
"Tensors must have same number of dimensions: got %d and %d",
first_dims, second_dims);
for (int dim = 0; dim < first_dims; dim++) {
if (dim == dimension) {
continue;
}
int64_t first_dim_size = THCTensor_(size)(state, first, dim);
int64_t second_dim_size = THCTensor_(size)(state, second, dim);
THArgCheck(first_dim_size == second_dim_size, 0,
"Sizes of tensors must match except in dimension %d. Got %lld and %lld in dimension %d",
dimension, (long long)first_dim_size, (long long)second_dim_size, dim);
}
}
void THCTensor_(catArray)(THCState *state, THCTensor *result,
THCTensor **inputs, int numInputs, int dimension)
{
// previously, size [0] tensors were the only possible empty tensors; thus, it wasn't possible
// to cat empty tensors unless all the other tensors were 1-dimensional, so we allowed these tensors
// to be "skipped". We maintain this behavior for backwards compatibility, but only for this specific
// size (i.e. other empty sizes are not skipped).
// FIXME: warn if this is the case
int i, j, cohortMax;
int64_t offset;
bool hasSkippedInput = false;
THCTensor *notSkippedTensor = NULL; // non-owning reference
auto should_skip = [](THCTensor *t) { return t->is_empty() && t->dim() == 1; };
int nDims = 0;
for (i = 0; i < numInputs; i++)
{
if (should_skip(inputs[i])) {
hasSkippedInput = true;
continue;
}
nDims = inputs[i]->dim();
notSkippedTensor = inputs[i];
}
// If all inputs are empty tensors, return an empty tensor
if (notSkippedTensor == NULL) {
return;
}
THArgCheck(numInputs > 0, 3, "invalid number of inputs %d", numInputs);
THArgCheck(dimension >= 0, 4, "invalid dimension %d", dimension);
std::vector<int64_t> size(nDims);
// Compute size of the result in the cat dimension
int64_t cat_dim_size = 0;
for (int i = 0; i < numInputs; i++) {
THCTensor *tensor = inputs[i];
if (should_skip(tensor)) {
continue;
}
THCTensor_(check_shape_except_dim)(state, notSkippedTensor, tensor, dimension);
cat_dim_size += THCTensor_(size)(state, tensor, dimension);
}
// Compute the size of the result
for (int dim = 0; dim < nDims; dim++) {
int64_t result_dim_size = THCTensor_(size)(state, notSkippedTensor, dim);
if (dim == dimension) {
result_dim_size = cat_dim_size;
}
size[dim] = result_dim_size;
}
THCTensor_(resize)(state, result, size, {});
// We parallelize the copy if all 6 conditions pass:
//
// 1. There is more than one input tensor
// 2. No empty inputs
// 3. The result tensor is 32-bit indexable
// 4. The number of dimensions is <= 4
// 5. All input tensors are contiguous (output tensor may be non-contig)
// 6. All input tensors can use 32-bit indexing
// 7. All input tensors are on the same device
if (numInputs > 1 &&
!hasSkippedInput &&
result->dim() <= CAT_ARRAY_MAX_INPUT_DIMS &&
THCTensor_canUse32BitIndexMath(state, result) &&
THCTensor_allContiguous(state, inputs, numInputs) &&
THCTensor_all32BitIndexable(state, inputs, numInputs) &&
THCTensor_allSameDevice(state, inputs, numInputs)) {
// First, let's set up our kernel parameters. We start with a raw pointer to the storage
// for the output Tensor.
scalar_t *data = THCTensor_(data)(state, result);
// Kernel Parameter
size_t tensorMetadataSize = sizeof(CatArrInputTensor<scalar_t, unsigned int>) * CAT_ARRAY_BATCH_SIZE;
auto d_inputs = static_cast<CatArrInputTensor<scalar_t, unsigned int> *>(THCudaMalloc(state, tensorMetadataSize));
OutputTensorSizeStride<unsigned int, CAT_ARRAY_MAX_INPUT_DIMS> param;
// Next, let's initialize the size, stride arrays for the output Tensor.
for (i = 0; i < nDims; ++i) {
param.outputSize[i] = THCTensor_(size)(state, result, i);
param.outputStride[i] = THCTensor_(stride)(state, result, i);
}
at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream();
// Template Declarations for dim = 1, 2, 3, 4
#define HANDLE_CASE(DIMS) \
CatArrayBatchedCopy<scalar_t, unsigned int, DIMS><<<catGrid, applyBlock, 0, stream.stream()>>>(data, d_inputs, param, dimension, param.outputStride[dimension]);
// Now we loop
offset = 0;
for (i = 0; i < numInputs; i += CAT_ARRAY_BATCH_SIZE) {
// Re-allocate stackInputs every iteration to avoid read-after-write hazard
{
auto stackInputs_owner = THCudaHostAlloc(state, tensorMetadataSize);
CatArrInputTensor<scalar_t, unsigned int>* stackInputs = static_cast<CatArrInputTensor<scalar_t, unsigned int>*>(stackInputs_owner.get());
cohortMax = 0;
for (j = 0; j < CAT_ARRAY_BATCH_SIZE && (i+j) < numInputs; ++j) {
int64_t dimSize = THCTensor_(size)(state, inputs[i+j], dimension);
stackInputs[j].input = THCTensor_(data)(state, inputs[i+j]);
stackInputs[j].offset = offset;
stackInputs[j].dimSize = dimSize;
stackInputs[j].nElements = THCTensor_(nElement)(state, inputs[i+j]);
cohortMax = cohortMax > (int) stackInputs[j].nElements ? cohortMax : (int) stackInputs[j].nElements;
// update offset
offset += dimSize;
}
THCudaCheck(cudaMemcpyAsync(
d_inputs,
stackInputs,
j * sizeof(CatArrInputTensor<scalar_t, unsigned int>),
cudaMemcpyHostToDevice,
stream.stream()));
THCudaHostRecord(state, stackInputs);
}
// Next, let's consider how we set our kernel launch parameters.
// We borrow from THCApply, which the kernel's internal indexing
// is based on.
dim3 applyBlock = getApplyBlock();
//Get grid where x dim fills half gpu and y dim is number of tensors.
//This will have cating two tensors fill the entire grid, but prevent
//many threads from needlessly load meta data if their sizes is small.
dim3 catGrid;
getCatGrid(state, j, catGrid);
switch (nDims) {
case 1:
HANDLE_CASE(1);
break;
case 2:
HANDLE_CASE(2);
break;
case 3:
HANDLE_CASE(3);
break;
case 4:
HANDLE_CASE(4);
break;
}
THCudaCheck(cudaGetLastError());
}
THCudaFree(state, d_inputs);
#undef HANDLE_CASE
} else {
offset = 0;
for (j = 0; j < numInputs; j++)
{
if (should_skip(inputs[j])) continue;
int64_t dimSize = THCTensor_(size)(state, inputs[j], dimension);
THCTensor *nt = THCTensor_(newWithTensor)(state, result);
THCTensor_(narrow)(state, nt, NULL, dimension, offset, dimSize);
THCTensor_(copy)(state, nt, inputs[j]);
THCTensor_(free)(state, nt);
offset += dimSize;
}
}
}
void THCTensor_(nonzero)(THCState* state, THCudaLongTensor *tensor,
THCTensor *self)
{
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self ));
THCAssertSameGPU(THCudaLongTensor_checkGPU(state, 1, tensor));
using namespace thrust::placeholders;
THCThrustAllocator thrustAlloc(state);
self = THCTensor_(newContiguous)(state, self);
thrust::device_ptr<scalar_t> self_data(THCTensor_(data)(state, self));
int num_dim = THCTensor_(nDimension)(state, self);
int num_dim_noscalars = std::max<int>(1, num_dim);
int64_t N = THCTensor_(nElement)(state, self);
// this is a little awkward for scalars because we run thrust to count the number of zeros
// (which are necessary to get the correct size), but thrust just has an array API, so
// we need to basically threat the scalar as a 1-dimensional tensor (array) for
// the counting part.
THCudaLongTensor_resize2d(state, tensor, N, num_dim_noscalars);
tensor = THCudaLongTensor_newContiguous(state, tensor);
thrust::device_ptr<int64_t> tensor_data(THCudaLongTensor_data(state, tensor));
thrust::counting_iterator<int64_t> idxfirst(0);
thrust::counting_iterator<int64_t> idxlast = idxfirst + N;
typedef thrust::device_ptr<int64_t> Iter;
strided_range<Iter> strided_tensor(tensor_data,
tensor_data+N*num_dim_noscalars, num_dim_noscalars);
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
cudaStream_t stream = THCState_getCurrentStream(state);
#endif
strided_range<Iter>::iterator dend = thrust::copy_if(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
idxfirst,
idxlast,
self_data,
strided_tensor.begin(),
NonZeroOp<scalar_t>()
);
int64_t num_nonzeros = thrust::distance(strided_tensor.begin(), dend);
if (num_nonzeros > 0 && num_dim > 0) {
int64_t div = 1;
for (int dim = num_dim-1; dim >= 0; dim--) {
strided_range<Iter> stride_dim(tensor_data+dim,
tensor_data+N*num_dim, num_dim);
thrust::transform(
#if CUDA_VERSION >= 7000 || defined __HIP_PLATFORM_HCC__
thrust::cuda::par(thrustAlloc).on(stream),
#endif
strided_tensor.begin(),
strided_tensor.end(),
stride_dim.begin(),
idx_functor(div, THTensor_(size)(self, dim))
);
div *= THTensor_(size)(self, dim);
}
}
THCudaLongTensor_resize2d(state, tensor, num_nonzeros, num_dim);
THCTensor_(free)(state, self);
THCudaLongTensor_free(state, tensor);
THCudaCheck(cudaGetLastError());
}
#if !defined(THC_REAL_IS_BOOL) /* non bool only part */
void THCTensor_(diag)(THCState *state, THCTensor *self_, THCTensor *src_, int64_t k){
THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, src_));
int nDimension = THCTensor_(nDimension)(state, src_);
THArgCheck((nDimension == 2) || (nDimension == 1), 1, "expected a matrix or a vector");
if (nDimension == 2) {
int64_t stride0 = THCTensor_(stride)(state, src_, 0);
int64_t stride1 = THCTensor_(stride)(state, src_, 1);
int64_t size0 = THCTensor_(size)(state, src_, 0);
int64_t size1 = THCTensor_(size)(state, src_, 1);
int64_t size = (k > 0) ? min((int64_t)size0, (int64_t)size1 - k) : min((int64_t)size0 + k, (int64_t)size1);
THCTensor_(resize1d)(state, self_, size);
if (size > 0) {
int64_t strideSelf = THCTensor_(stride)(state, self_, 0);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (int64_t)threads.x)));
int64_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyFromDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, size, stride0 + stride1, strideSelf);
}
} else {
ptrdiff_t totalElements = THCTensor_(nElement)(state, src_);
ptrdiff_t size = (k > 0) ? totalElements + k : totalElements - k;
int64_t strideSrc = THTensor_(stride)(src_, 0);
THCTensor_(resize2d)(state, self_, size, size);
THCTensor_(zero)(state, self_);
if (size > 0) {
int64_t stride0 = THCTensor_(stride)(state, self_, 0);
int64_t stride1 = THCTensor_(stride)(state, self_, 1);
const dim3 threads(min((int64_t)at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, (int64_t)size));
dim3 grid(min((int64_t)1024, (int64_t)THCCeilDiv(size, (ptrdiff_t)threads.x)));
ptrdiff_t start = (k >= 0 ? k * stride1 : -k * stride0);
THCTensor_copyToDiagonal<scalar_t><<<grid, threads, 0, THCState_getCurrentStream(state)>>>
(THCTensor_(data)(state, self_), THCTensor_(data)(state, src_), start, totalElements, stride0 + stride1, strideSrc);
}
}
THCudaCheck(cudaGetLastError());
}
accreal THCTensor_(trace)(THCState *state, THCTensor *src_) {
THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, src_));
THArgCheck((THTensor_nDimensionLegacyAll(src_) == 2), 1, "expected a matrix");
THCTensor *diag = THCTensor_(new)(state);
THCTensor_(diag)(state, diag, src_, 0);
accreal trace = THCTensor_(sumall)(state, diag);
THCTensor_(free)(state, diag);
return trace;
}
#endif
#endif
|
ed0062cbc1157d5d7910a4fe216ce866c54a2787.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for (int i = 0; i < BLOCK_SIZE; i++) {
for (int j = 0; j < BLOCK_SIZE; j++) {
sum += in[start + i * width + j] * mul[j];
}
}
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / sum;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / sum;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / sum;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
hipHostMalloc((void**)&hA_in, SIZE * sizeof(float));
hipHostMalloc((void**)&hA_out, SIZE * sizeof(float));
hipHostMalloc((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
hipMalloc((void **)&dA_in, SIZE * sizeof(float));
hipMalloc((void **)&dA_out, SIZE * sizeof(float));
hipMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
hipMemcpy(dA_in, hA_in, SIZE * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), hipMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
hipDeviceSynchronize();
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
norm << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
hipMemcpy(hA_out, dA_out, SIZE * sizeof(float), hipMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
| ed0062cbc1157d5d7910a4fe216ce866c54a2787.cu | #include <cuda_runtime.h>
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <assert.h>
#include <cmath>
#include "device_launch_parameters.h"
// #include<sys/time.h>
#define BLOCK_SIZE 16
#define GRID_SIZE 128
#define SIZE BLOCK_SIZE*BLOCK_SIZE*GRID_SIZE*GRID_SIZE
void checkresult(float *ref, float *in, float *out, float *mul, int width) {
for (int i = 0; i < GRID_SIZE; i++) {
for (int j = 0; j < GRID_SIZE; j++) {
float sum = 0.0f;
int start = j * BLOCK_SIZE * width + i * BLOCK_SIZE;
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
sum += in[start + ii * width + jj] * mul[jj];
}
}
for (int ii = 0; ii < BLOCK_SIZE; ii++) {
for (int jj = 0; jj < BLOCK_SIZE; jj++) {
if (jj % 2 == 0 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 2.0 * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 0)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else if (jj % 2 == 1 && ii % 2 == 1)
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = (-1.0) * in[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] / sum;
else
ref[(j * BLOCK_SIZE + jj) * width + i * BLOCK_SIZE + ii] = 0.0f;
}
}
}
}
for (int i = 0; i < SIZE; i++) {
if (abs(ref[i] - out[i]) > 1.e-6) {
printf("Diff %f\n", abs(ref[i] - out[i]));
printf("results checking failed at %d ref %f out %f\n", i, ref[i], out[i]);
return;
}
}
printf("results checking passed!\n");
}
__global__ void norm(float *in, float *out, float *mul, int width) {
int tx = blockIdx.x * blockDim.x + threadIdx.x;
int ty = blockIdx.y * blockDim.y + threadIdx.y;
if (tx >= width || ty >= SIZE / width) return;
int start = blockIdx.x * blockDim.x * width + blockIdx.y * blockDim.y;
float sum = 0.0f;
for (int i = 0; i < BLOCK_SIZE; i++) {
for (int j = 0; j < BLOCK_SIZE; j++) {
sum += in[start + i * width + j] * mul[j];
}
}
if (tx % 2 == 0 && ty % 2 == 0)
out[tx * width + ty] = 2.0 * in[tx * width + ty] / sum;
else if (tx % 2 == 1 && ty % 2 == 0)
out[tx * width + ty] = in[tx * width + ty] / sum;
else if (tx % 2 == 1 && ty % 2 == 1)
out[tx * width + ty] = (-1.0) * in[tx * width + ty] / sum;
else
out[tx * width + ty] = 0.0f;
}
int main() {
//float *hA_in = (float *)malloc(SIZE * sizeof(float));
//float *hA_out = (float *)malloc(SIZE * sizeof(float));
//float *hB_in = (float *)malloc(BLOCK_SIZE * sizeof(float));
float *ref = (float *)malloc(SIZE * sizeof(float));
float *hA_in, *hA_out, *hB_in;
float *dA_in, *dA_out, *dB_in;
cudaMallocHost((void**)&hA_in, SIZE * sizeof(float));
cudaMallocHost((void**)&hA_out, SIZE * sizeof(float));
cudaMallocHost((void**)&hB_in, BLOCK_SIZE * sizeof(float));
srand(2016);
for (int i = 0; i < SIZE; i++) {
hA_in[i] = (float)rand() / (float)RAND_MAX;
}
for (int i = 0; i < BLOCK_SIZE; i++) {
hB_in[i] = (float)rand() / (float)RAND_MAX;
}
cudaMalloc((void **)&dA_in, SIZE * sizeof(float));
cudaMalloc((void **)&dA_out, SIZE * sizeof(float));
cudaMalloc((void **)&dB_in, BLOCK_SIZE * sizeof(float));
cudaMemcpy(dA_in, hA_in, SIZE * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(dB_in, hB_in, BLOCK_SIZE * sizeof(float), cudaMemcpyHostToDevice);
dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1);
dim3 grid(GRID_SIZE, GRID_SIZE, 1);
cudaDeviceSynchronize();
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
norm << <grid, block >> > (dA_in, dA_out, dB_in, BLOCK_SIZE * GRID_SIZE);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("kernel time %fs\n", milliseconds);
cudaMemcpy(hA_out, dA_out, SIZE * sizeof(float), cudaMemcpyDeviceToHost);
checkresult(ref, hA_in, hA_out, hB_in, BLOCK_SIZE * GRID_SIZE);
}
|
c18b8fecf2b3cb3580c4109822b8ad5d2bb22a7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <time.h>
#include <float.h>
#include <hiprand/hiprand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(hipError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
hipDeviceReset();
exit(99);
}
}
#define RANDVEC3 vec3(hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state),hiprand_uniform(local_rand_state))
__device__ vec3 random_in_unit_sphere(hiprandState_t *local_rand_state) {
vec3 p;
do {
p = 2.0f*RANDVEC3 - vec3(1, 1, 1);
} while (p.squared_length() >= 1.0f);
return p;
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable **world, hiprandState_t *local_rand_state) {
ray cur_ray = r;
float cur_attenuation = 1.0f;
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state);
cur_attenuation *= 0.5f;
cur_ray = ray(rec.p, target - rec.p);
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void render_init(int max_x, int max_y, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
hiprand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, hiprandState_t *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
hiprandState_t local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + hiprand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + hiprand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0, 0, -1), 0.5);
*(d_list + 1) = new sphere(vec3(0, -100.5, -1), 100);
*d_world = new hitable_list(d_list, 2);
*d_camera = new camera();
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
delete *(d_list);
delete *(d_list + 1);
delete *d_world;
delete *d_camera;
}
int main() {
int nx = 1200;
int ny = 600;
int ns = 100;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(hipMallocManaged((void **)&fb, fb_size));
// allocate random state
hiprandState_t *d_rand_state;
checkCudaErrors(hipMalloc((void **)&d_rand_state, num_pixels * sizeof(hiprandState_t)));
// make our world of hitables & the camera
hitable **d_list;
checkCudaErrors(hipMalloc((void **)&d_list, 2 * sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(hipMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(hipMalloc((void **)&d_camera, sizeof(camera *)));
create_world << <1, 1 >> >(d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> >(nx, ny, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(hipDeviceSynchronize());
free_world << <1, 1 >> >(d_list, d_world, d_camera);
checkCudaErrors(hipGetLastError());
checkCudaErrors(hipFree(d_camera));
checkCudaErrors(hipFree(d_world));
checkCudaErrors(hipFree(d_list));
checkCudaErrors(hipFree(d_rand_state));
checkCudaErrors(hipFree(fb));
hipDeviceReset();
} | c18b8fecf2b3cb3580c4109822b8ad5d2bb22a7f.cu | #include <iostream>
#include <time.h>
#include <float.h>
#include <curand_kernel.h>
#include "vec3.h"
#include "ray.h"
#include "sphere.h"
#include "hitable_list.h"
#include "camera.h"
// limited version of checkCudaErrors from helper_cuda.h in CUDA examples
#define checkCudaErrors(val) check_cuda( (val), #val, __FILE__, __LINE__ )
void check_cuda(cudaError_t result, char const *const func, const char *const file, int const line) {
if (result) {
std::cerr << "CUDA error = " << static_cast<unsigned int>(result) << " at " <<
file << ":" << line << " '" << func << "' \n";
// Make sure we call CUDA Device Reset before exiting
cudaDeviceReset();
exit(99);
}
}
#define RANDVEC3 vec3(curand_uniform(local_rand_state),curand_uniform(local_rand_state),curand_uniform(local_rand_state))
__device__ vec3 random_in_unit_sphere(curandState *local_rand_state) {
vec3 p;
do {
p = 2.0f*RANDVEC3 - vec3(1, 1, 1);
} while (p.squared_length() >= 1.0f);
return p;
}
// Matching the C++ code would recurse enough into color() calls that
// it was blowing up the stack, so we have to turn this into a
// limited-depth loop instead. Later code in the book limits to a max
// depth of 50, so we adapt this a few chapters early on the GPU.
__device__ vec3 color(const ray& r, hitable **world, curandState *local_rand_state) {
ray cur_ray = r;
float cur_attenuation = 1.0f;
for (int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
vec3 target = rec.p + rec.normal + random_in_unit_sphere(local_rand_state);
cur_attenuation *= 0.5f;
cur_ray = ray(rec.p, target - rec.p);
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f - t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0, 0.0, 0.0); // exceeded recursion
}
__global__ void render_init(int max_x, int max_y, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
//Each thread gets same seed, a different sequence number, no offset
curand_init(1984, pixel_index, 0, &rand_state[pixel_index]);
}
__global__ void render(vec3 *fb, int max_x, int max_y, int ns, camera **cam, hitable **world, curandState *rand_state) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if ((i >= max_x) || (j >= max_y)) return;
int pixel_index = j*max_x + i;
curandState local_rand_state = rand_state[pixel_index];
vec3 col(0, 0, 0);
for (int s = 0; s < ns; s++) {
float u = float(i + curand_uniform(&local_rand_state)) / float(max_x);
float v = float(j + curand_uniform(&local_rand_state)) / float(max_y);
ray r = (*cam)->get_ray(u, v);
col += color(r, world, &local_rand_state);
}
rand_state[pixel_index] = local_rand_state;
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
fb[pixel_index] = col;
}
__global__ void create_world(hitable **d_list, hitable **d_world, camera **d_camera) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
*(d_list) = new sphere(vec3(0, 0, -1), 0.5);
*(d_list + 1) = new sphere(vec3(0, -100.5, -1), 100);
*d_world = new hitable_list(d_list, 2);
*d_camera = new camera();
}
}
__global__ void free_world(hitable **d_list, hitable **d_world, camera **d_camera) {
delete *(d_list);
delete *(d_list + 1);
delete *d_world;
delete *d_camera;
}
int main() {
int nx = 1200;
int ny = 600;
int ns = 100;
int tx = 8;
int ty = 8;
std::cerr << "Rendering a " << nx << "x" << ny << " image with " << ns << " samples per pixel ";
std::cerr << "in " << tx << "x" << ty << " blocks.\n";
int num_pixels = nx*ny;
size_t fb_size = num_pixels * sizeof(vec3);
// allocate FB
vec3 *fb;
checkCudaErrors(cudaMallocManaged((void **)&fb, fb_size));
// allocate random state
curandState *d_rand_state;
checkCudaErrors(cudaMalloc((void **)&d_rand_state, num_pixels * sizeof(curandState)));
// make our world of hitables & the camera
hitable **d_list;
checkCudaErrors(cudaMalloc((void **)&d_list, 2 * sizeof(hitable *)));
hitable **d_world;
checkCudaErrors(cudaMalloc((void **)&d_world, sizeof(hitable *)));
camera **d_camera;
checkCudaErrors(cudaMalloc((void **)&d_camera, sizeof(camera *)));
create_world << <1, 1 >> >(d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
clock_t start, stop;
start = clock();
// Render our buffer
dim3 blocks(nx / tx + 1, ny / ty + 1);
dim3 threads(tx, ty);
render_init << <blocks, threads >> >(nx, ny, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
render << <blocks, threads >> >(fb, nx, ny, ns, d_camera, d_world, d_rand_state);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaDeviceSynchronize());
stop = clock();
double timer_seconds = ((double)(stop - start)) / CLOCKS_PER_SEC;
std::cerr << "took " << timer_seconds << " seconds.\n";
// Output FB as Image
std::cout << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny - 1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(255.99*fb[pixel_index].r());
int ig = int(255.99*fb[pixel_index].g());
int ib = int(255.99*fb[pixel_index].b());
std::cout << ir << " " << ig << " " << ib << "\n";
}
}
// clean up
checkCudaErrors(cudaDeviceSynchronize());
free_world << <1, 1 >> >(d_list, d_world, d_camera);
checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaFree(d_camera));
checkCudaErrors(cudaFree(d_world));
checkCudaErrors(cudaFree(d_list));
checkCudaErrors(cudaFree(d_rand_state));
checkCudaErrors(cudaFree(fb));
cudaDeviceReset();
} |
2b07883dceab55c097b1c0806c6bcbcd3cd60352.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "MyCuda.h"
/**
* warp __syncthreads();
* volatile : volatilevolatile
*
* sdata
* tid
*/
__device__ void warpReduce(volatile float *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
/**
* reduce3 warp
* globalInputData
* globalOutputData
*/
__global__ void reduce4(float *globalInputData, float *globalOutputData, unsigned int n)
{
__shared__ float sdata[BLOCK_SIZE];
//
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
unsigned int indexWithOffset = index + blockDim.x;
if (index >= n)
{
sdata[tid] = 0;
}
else if (indexWithOffset >= n)
{
sdata[tid] = globalInputData[index];
}
else
{
sdata[tid] = globalInputData[index] + globalInputData[indexWithOffset];
}
__syncthreads();
//
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
warpReduce(sdata, tid);
}
//
if (tid == 0)
{
globalOutputData[blockIdx.x] = sdata[0];
}
}
/**
* reduce4
* fMatrix_Host
* iRow
* iCol
* @return
*/
float RuntimeOfReduce4(float *fMatrix_Host, const int iRow, const int iCol)
{
//
if (iRow <= 0 || iCol <= 0)
{
std::cout << "The size of the matrix is error!" << std::endl;
return 0.0;
}
float *fReuslt = (float*)malloc(sizeof(float));;
float *fMatrix_Device; //
int iMatrixSize = iRow * iCol; //
HANDLE_ERROR(hipMalloc((void**)&fMatrix_Device, iMatrixSize * sizeof(float))); //
HANDLE_ERROR(hipMemcpy(fMatrix_Device, fMatrix_Host, iMatrixSize * sizeof(float), hipMemcpyHostToDevice)); //
//
hipEvent_t start_GPU, end_GPU;
float elaspsedTime;
hipEventCreate(&start_GPU);
hipEventCreate(&end_GPU);
hipEventRecord(start_GPU, 0);
for (int i = 1, int iNum = iMatrixSize; i < iMatrixSize; i = 2 * i * BLOCK_SIZE)
{
int iBlockNum = (iNum + (2 * BLOCK_SIZE) - 1) / (2 * BLOCK_SIZE);
hipLaunchKernelGGL(( reduce4), dim3(iBlockNum), dim3(BLOCK_SIZE), 0, 0, fMatrix_Device, fMatrix_Device, iNum);
iNum = iBlockNum;
}
HANDLE_ERROR(hipMemcpy(fReuslt, fMatrix_Device, sizeof(float), hipMemcpyDeviceToHost)); //
//
hipEventRecord(end_GPU, 0);
hipEventSynchronize(end_GPU);
hipEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
hipEventDestroy(start_GPU);
hipEventDestroy(end_GPU);
std::cout << "Reduce4 " << elaspsedTime << "ms." << std::endl;
HANDLE_ERROR(hipFree(fMatrix_Device));//
return fReuslt[0];
} | 2b07883dceab55c097b1c0806c6bcbcd3cd60352.cu | #include "MyCuda.h"
/**
* 每个 warp 自动同步,不用 __syncthreads();
* volatile : 加上关键字volatile的变量将被定义为敏感变量,意思是加了volatile
* 的变量在内存中的值可能会随时发生变化,当程序要去读取这个变量时,
必须要从内存中读取,而不是从缓存中读取
* sdata 数组头指针,数组位于共享内存
* tid 线程索引
*/
__device__ void warpReduce(volatile float *sdata, int tid)
{
sdata[tid] += sdata[tid + 32];
sdata[tid] += sdata[tid + 16];
sdata[tid] += sdata[tid + 8];
sdata[tid] += sdata[tid + 4];
sdata[tid] += sdata[tid + 2];
sdata[tid] += sdata[tid + 1];
}
/**
* 优化:解决了 reduce3 中存在的多余同步操作(每个warp默认自动同步)。
* globalInputData 输入数据,位于全局内存
* globalOutputData 输出数据,位于全局内存
*/
__global__ void reduce4(float *globalInputData, float *globalOutputData, unsigned int n)
{
__shared__ float sdata[BLOCK_SIZE];
// 坐标索引
unsigned int tid = threadIdx.x;
unsigned int index = blockIdx.x*(blockDim.x * 2) + threadIdx.x;
unsigned int indexWithOffset = index + blockDim.x;
if (index >= n)
{
sdata[tid] = 0;
}
else if (indexWithOffset >= n)
{
sdata[tid] = globalInputData[index];
}
else
{
sdata[tid] = globalInputData[index] + globalInputData[indexWithOffset];
}
__syncthreads();
// 在共享内存中对每一个块进行规约计算
for (unsigned int s = blockDim.x / 2; s>32; s >>= 1)
{
if (tid < s)
{
sdata[tid] += sdata[tid + s];
}
__syncthreads();
}
if (tid < 32)
{
warpReduce(sdata, tid);
}
// 把计算结果从共享内存写回全局内存
if (tid == 0)
{
globalOutputData[blockIdx.x] = sdata[0];
}
}
/**
* 计算 reduce4 函数的时间
* fMatrix_Host 矩阵头指针
* iRow 矩阵行数
* iCol 矩阵列数
* @return 和
*/
float RuntimeOfReduce4(float *fMatrix_Host, const int iRow, const int iCol)
{
// 检查矩阵维度是否正确
if (iRow <= 0 || iCol <= 0)
{
std::cout << "The size of the matrix is error!" << std::endl;
return 0.0;
}
float *fReuslt = (float*)malloc(sizeof(float));;
float *fMatrix_Device; // 指向设备显存
int iMatrixSize = iRow * iCol; // 矩阵元素个数
HANDLE_ERROR(cudaMalloc((void**)&fMatrix_Device, iMatrixSize * sizeof(float))); // 在显存中为矩阵开辟空间
HANDLE_ERROR(cudaMemcpy(fMatrix_Device, fMatrix_Host, iMatrixSize * sizeof(float), cudaMemcpyHostToDevice)); // 将数据拷贝到显存
// 记录起始时间
cudaEvent_t start_GPU, end_GPU;
float elaspsedTime;
cudaEventCreate(&start_GPU);
cudaEventCreate(&end_GPU);
cudaEventRecord(start_GPU, 0);
for (int i = 1, int iNum = iMatrixSize; i < iMatrixSize; i = 2 * i * BLOCK_SIZE)
{
int iBlockNum = (iNum + (2 * BLOCK_SIZE) - 1) / (2 * BLOCK_SIZE);
reduce4<<<iBlockNum, BLOCK_SIZE>>>(fMatrix_Device, fMatrix_Device, iNum);
iNum = iBlockNum;
}
HANDLE_ERROR(cudaMemcpy(fReuslt, fMatrix_Device, sizeof(float), cudaMemcpyDeviceToHost)); // 将数据拷贝到内存
// 计时结束
cudaEventRecord(end_GPU, 0);
cudaEventSynchronize(end_GPU);
cudaEventElapsedTime(&elaspsedTime, start_GPU, end_GPU);
cudaEventDestroy(start_GPU);
cudaEventDestroy(end_GPU);
std::cout << "Reduce4 的运行时间为:" << elaspsedTime << "ms." << std::endl;
HANDLE_ERROR(cudaFree(fMatrix_Device));// 释放显存空间
return fReuslt[0];
} |
c383da5ad32171eda7d32adecf0c74308b2d0c33.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<cuda.h>
#include "kernels.h"
__global__ void per_row_kernel(int *in,int N){
int row = threadIdx.x * blockDim.y + threadIdx.y + blockIdx.x * blockDim.x * blockDim.y;
if(row<N)
{
for(int i=0;i<N;i++)
{
if(i>row)
{
in[N*row + i] = in[i*N + row];
in[i*N + row] = 0;
}
}
}
}
__global__ void per_element_kernel(int *in, int N){
long int ele = (blockIdx.x*gridDim.y+blockIdx.y)*(gridDim.z*blockDim.x)+(blockIdx.z*blockDim.x+threadIdx.x);
if(ele < N*N-1)
{
int x = ele/N;
int y = ele%N;
if(ele > x*N+x)
{
in[ele] = in[y*N+x];
in[y*N+x] = 0;
}
}
}
__global__ void per_element_kernel_2D(int *in, int N){
long int ele = (blockIdx.x*gridDim.y+blockIdx.y)*(blockDim.x*blockDim.y)+(threadIdx.x*blockDim.y+threadIdx.y);
if(ele < N*N-1)
{
int x = ele/N;
int y = ele%N;
if(ele > x*N+x)
{
in[ele] = in[y*N+x];
in[y*N+x] = 0;
}
}
}
| c383da5ad32171eda7d32adecf0c74308b2d0c33.cu | #include<stdio.h>
#include<cuda.h>
#include "kernels.h"
__global__ void per_row_kernel(int *in,int N){
int row = threadIdx.x * blockDim.y + threadIdx.y + blockIdx.x * blockDim.x * blockDim.y;
if(row<N)
{
for(int i=0;i<N;i++)
{
if(i>row)
{
in[N*row + i] = in[i*N + row];
in[i*N + row] = 0;
}
}
}
}
__global__ void per_element_kernel(int *in, int N){
long int ele = (blockIdx.x*gridDim.y+blockIdx.y)*(gridDim.z*blockDim.x)+(blockIdx.z*blockDim.x+threadIdx.x);
if(ele < N*N-1)
{
int x = ele/N;
int y = ele%N;
if(ele > x*N+x)
{
in[ele] = in[y*N+x];
in[y*N+x] = 0;
}
}
}
__global__ void per_element_kernel_2D(int *in, int N){
long int ele = (blockIdx.x*gridDim.y+blockIdx.y)*(blockDim.x*blockDim.y)+(threadIdx.x*blockDim.y+threadIdx.y);
if(ele < N*N-1)
{
int x = ele/N;
int y = ele%N;
if(ele > x*N+x)
{
in[ele] = in[y*N+x];
in[y*N+x] = 0;
}
}
}
|
344a16ed0c7a78c64abc0c964b2c89304fc02ebc.hip | // !!! This is a file automatically generated by hipify!!!
#include "ATen/Context.h"
#include "ATen/hip/HIPContext.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/hip/PinnedMemoryAllocator.h"
#include "ATen/hip/HIPApplyUtils.cuh"
#include "ATen/native/LinearAlgebraUtils.h"
#include "ATen/native/Gesv.h"
#include "THH.h" // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
magma_sgesv_batched(
n, nrhs, dA_array, ldda, dipiv_array,
dB_array, lddb, dinfo_array, batch_count, queue);
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
magma_dgesv_batched(
n, nrhs, dA_array, ldda, dipiv_array,
dB_array, lddb, dinfo_array, batch_count, queue);
}
static magma_queue_t createMagmaQueue(const Tensor& tensor) {
auto& context = tensor.type().get_context();
magma_queue_t magma_queue;
magma_queue_create_from_hip(
tensor.get_device(),
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
THCState_getCurrentBlasHandle(context.getTHCState()),
THCState_getCurrentSparseHandle(context.getTHCState()),
&magma_queue);
return magma_queue;
}
static inline magma_int_t magma_int_cast(int64_t value, const char* varname) {
auto result = static_cast<magma_int_t>(value);
if (static_cast<int64_t>(result) != value) {
AT_ERROR("magma: The value of %s (%lld) is too large to fit into a magma_int_t (%llu bytes)",
varname, (long long)value, sizeof(magma_int_t));
}
return result;
}
#endif
// Creates an array of size elements of type T, backed by pinned memory
// wrapped in a Storage
template<class T>
static inline std::unique_ptr<Storage> pin_memory(int64_t size, Tensor dummy) {
int64_t adjusted_size = size * sizeof(T);
auto* allocator = cuda::getPinnedMemoryAllocator();
auto& backend = dummy.type().toBackend(kCPU).toScalarType(kByte);
return backend.storageWithAllocator(adjusted_size, allocator);
}
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = reinterpret_cast<type*>(storage_##name->data());
template <typename scalar_t>
static void applyGesv(Tensor& b, Tensor& A, std::vector<int64_t> infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, createMagmaQueue(b));
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
std::tuple<Tensor,Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
std::vector<int64_t> infos(batchCount(A), 0);
auto A_working_copy = cloneBatchedColumnMajor(A);
auto b_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
applyGesv<scalar_t>(b_working_copy, A_working_copy, infos);
});
checkErrors(infos);
return std::tuple<Tensor,Tensor>(b_working_copy, A_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
| 344a16ed0c7a78c64abc0c964b2c89304fc02ebc.cu | #include "ATen/Context.h"
#include "ATen/cuda/CUDAContext.h"
#include "ATen/Dispatch.h"
#include "ATen/NativeFunctions.h"
#include "ATen/cuda/PinnedMemoryAllocator.h"
#include "ATen/cuda/CUDAApplyUtils.cuh"
#include "ATen/native/LinearAlgebraUtils.h"
#include "ATen/native/Gesv.h"
#include "THC.h" // for USE_MAGMA
#ifdef USE_MAGMA
#include <magma.h>
#include <magma_types.h>
#endif
namespace at {
namespace native {
#ifdef USE_MAGMA
template<class scalar_t>
void magmaGesvBatched(
magma_int_t n, magma_int_t nrhs, scalar_t** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, scalar_t** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
AT_ERROR("gesv only takes float or double Tensors");
}
template<>
void magmaGesvBatched<float>(
magma_int_t n, magma_int_t nrhs, float** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, float** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
magma_sgesv_batched(
n, nrhs, dA_array, ldda, dipiv_array,
dB_array, lddb, dinfo_array, batch_count, queue);
}
template<>
void magmaGesvBatched<double>(
magma_int_t n, magma_int_t nrhs, double** dA_array, magma_int_t ldda,
magma_int_t** dipiv_array, double** dB_array, magma_int_t lddb,
magma_int_t* dinfo_array, magma_int_t batch_count, magma_queue_t queue) {
magma_dgesv_batched(
n, nrhs, dA_array, ldda, dipiv_array,
dB_array, lddb, dinfo_array, batch_count, queue);
}
static magma_queue_t createMagmaQueue(const Tensor& tensor) {
auto& context = tensor.type().get_context();
magma_queue_t magma_queue;
magma_queue_create_from_cuda(
tensor.get_device(),
at::cuda::getCurrentCUDAStream(),
THCState_getCurrentBlasHandle(context.getTHCState()),
THCState_getCurrentSparseHandle(context.getTHCState()),
&magma_queue);
return magma_queue;
}
static inline magma_int_t magma_int_cast(int64_t value, const char* varname) {
auto result = static_cast<magma_int_t>(value);
if (static_cast<int64_t>(result) != value) {
AT_ERROR("magma: The value of %s (%lld) is too large to fit into a magma_int_t (%llu bytes)",
varname, (long long)value, sizeof(magma_int_t));
}
return result;
}
#endif
// Creates an array of size elements of type T, backed by pinned memory
// wrapped in a Storage
template<class T>
static inline std::unique_ptr<Storage> pin_memory(int64_t size, Tensor dummy) {
int64_t adjusted_size = size * sizeof(T);
auto* allocator = cuda::getPinnedMemoryAllocator();
auto& backend = dummy.type().toBackend(kCPU).toScalarType(kByte);
return backend.storageWithAllocator(adjusted_size, allocator);
}
#define ALLOCATE_ARRAY(name, type, size, dummy_tensor) \
auto storage_##name = pin_memory<type>(size, dummy_tensor); \
name = reinterpret_cast<type*>(storage_##name->data());
template <typename scalar_t>
static void applyGesv(Tensor& b, Tensor& A, std::vector<int64_t> infos) {
#ifndef USE_MAGMA
AT_ERROR("gesv: MAGMA library not found in "
"compilation. Please rebuild with MAGMA.");
#else
auto A_data = A.data<scalar_t>();
auto b_data = b.data<scalar_t>();
auto A_mat_stride = matrixStride(A);
auto b_mat_stride = matrixStride(b);
magma_int_t batch_size = magma_int_cast(batchCount(A), "batchCount");
magma_int_t n = magma_int_cast(A.size(-2), "A.size(-2)");
magma_int_t nrhs = magma_int_cast(b.size(-1), "b.size(-1)");
magma_int_t* info_array;
magma_int_t* ipiv_data;
magma_int_t** ipiv_array;
scalar_t** A_array;
scalar_t** b_array;
ALLOCATE_ARRAY(info_array, magma_int_t, batch_size, b);
ALLOCATE_ARRAY(ipiv_data, magma_int_t, batch_size * n, b);
ALLOCATE_ARRAY(ipiv_array, magma_int_t*, batch_size, b);
ALLOCATE_ARRAY(A_array, scalar_t*, batch_size, b);
ALLOCATE_ARRAY(b_array, scalar_t*, batch_size, b);
// Set up the created arrays
for (int64_t i = 0; i < batch_size; i++) {
A_array[i] = &A_data[i * A_mat_stride];
b_array[i] = &b_data[i * b_mat_stride];
ipiv_array[i] = &ipiv_data[i * n];
}
magmaGesvBatched<scalar_t>(
n, nrhs, A_array, n, ipiv_array, b_array, n,
info_array, batch_size, createMagmaQueue(b));
for (int64_t i = 0; i < batch_size; i++) {
infos[i] = info_array[i];
}
#endif
}
std::tuple<Tensor,Tensor> _gesv_helper_cuda(const Tensor& self, const Tensor& A) {
std::vector<int64_t> infos(batchCount(A), 0);
auto A_working_copy = cloneBatchedColumnMajor(A);
auto b_working_copy = cloneBatchedColumnMajor(self);
AT_DISPATCH_FLOATING_TYPES(self.type(), "gesv", [&]{
applyGesv<scalar_t>(b_working_copy, A_working_copy, infos);
});
checkErrors(infos);
return std::tuple<Tensor,Tensor>(b_working_copy, A_working_copy);
}
}} // namespace at::native
#undef ALLOCATE_ARRAY
|
863dffa532b2fec9dd454b5f99c72df921fbefbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "../cupmcontext.hpp" /*I "petscdevice.h" I*/
using namespace Petsc::device::cupm;
PetscErrorCode PetscDeviceContextCreate_CUDA(PetscDeviceContext dctx)
{
static constexpr auto cuda_context = CUPMContextCuda();
PetscFunctionBegin;
PetscCall(cuda_context.initialize(dctx->device));
dctx->data = new PetscDeviceContext_(CUDA);
PetscCall(PetscMemcpy(dctx->ops, &cuda_context.ops, sizeof(cuda_context.ops)));
PetscFunctionReturn(0);
}
/* Management of CUBLAS and CUSOLVER handles */
PetscErrorCode PetscCUBLASGetHandle(hipblasHandle_t *handle)
{
PetscDeviceContext dctx;
PetscFunctionBegin;
PetscValidPointer(handle, 1);
PetscCall(PetscDeviceContextGetCurrentContextAssertType_Internal(&dctx, PETSC_DEVICE_CUDA));
PetscCall(PetscDeviceContextGetBLASHandle_Internal(dctx, handle));
PetscFunctionReturn(0);
}
PetscErrorCode PetscCUSOLVERDnGetHandle(hipsolverDnHandle_t *handle)
{
PetscDeviceContext dctx;
PetscFunctionBegin;
PetscValidPointer(handle, 1);
PetscCall(PetscDeviceContextGetCurrentContextAssertType_Internal(&dctx, PETSC_DEVICE_CUDA));
PetscCall(PetscDeviceContextGetSOLVERHandle_Internal(dctx, handle));
PetscFunctionReturn(0);
}
| 863dffa532b2fec9dd454b5f99c72df921fbefbb.cu | #include "../cupmcontext.hpp" /*I "petscdevice.h" I*/
using namespace Petsc::device::cupm;
PetscErrorCode PetscDeviceContextCreate_CUDA(PetscDeviceContext dctx)
{
static constexpr auto cuda_context = CUPMContextCuda();
PetscFunctionBegin;
PetscCall(cuda_context.initialize(dctx->device));
dctx->data = new PetscDeviceContext_(CUDA);
PetscCall(PetscMemcpy(dctx->ops, &cuda_context.ops, sizeof(cuda_context.ops)));
PetscFunctionReturn(0);
}
/* Management of CUBLAS and CUSOLVER handles */
PetscErrorCode PetscCUBLASGetHandle(cublasHandle_t *handle)
{
PetscDeviceContext dctx;
PetscFunctionBegin;
PetscValidPointer(handle, 1);
PetscCall(PetscDeviceContextGetCurrentContextAssertType_Internal(&dctx, PETSC_DEVICE_CUDA));
PetscCall(PetscDeviceContextGetBLASHandle_Internal(dctx, handle));
PetscFunctionReturn(0);
}
PetscErrorCode PetscCUSOLVERDnGetHandle(cusolverDnHandle_t *handle)
{
PetscDeviceContext dctx;
PetscFunctionBegin;
PetscValidPointer(handle, 1);
PetscCall(PetscDeviceContextGetCurrentContextAssertType_Internal(&dctx, PETSC_DEVICE_CUDA));
PetscCall(PetscDeviceContextGetSOLVERHandle_Internal(dctx, handle));
PetscFunctionReturn(0);
}
|
708daa72ef222a252d54d035e0ac7a2d42c61a90.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "castingIntToFloat.h"
#include <math_functions.h>
#include "hip/device_functions.h"
#include <math.h>
#include <device_types.h>
#include <device_functions_decls.h>
#include <device_launch_parameters.h>
#include <deviceaccess.h>
#include <math_functions.h>
__global__ void kernelCastingUnsignedInt2Float(float* d_out,unsigned int* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]= __uint2float_rn(d_in[i]);
}
}
__global__ void kernelCastingInt2Float(float* d_out,int* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]= __int2float_rd(d_in[i]);
}
}
__global__ void kernelCastingFloat2Int(int* d_out,float* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]=__float2int_rd(d_in[i]);
}
}
__global__ void kernelCastingFloat2UnsignedInt(unsigned int* d_out,float* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]=__float2uint_rd(d_in[i]);
}
}
| 708daa72ef222a252d54d035e0ac7a2d42c61a90.cu | #include "castingIntToFloat.h"
#include <math_functions.h>
#include "device_functions.h"
#include <math.h>
#include <device_types.h>
#include <device_functions_decls.h>
#include <device_launch_parameters.h>
#include <deviceaccess.h>
#include <math_functions.h>
__global__ void kernelCastingUnsignedInt2Float(float* d_out,unsigned int* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]= __uint2float_rn(d_in[i]);
}
}
__global__ void kernelCastingInt2Float(float* d_out,int* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]= __int2float_rd(d_in[i]);
}
}
__global__ void kernelCastingFloat2Int(int* d_out,float* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]=__float2int_rd(d_in[i]);
}
}
__global__ void kernelCastingFloat2UnsignedInt(unsigned int* d_out,float* d_in,unsigned int n){
int i=blockIdx.x*blockDim.x + threadIdx.x;
if(i<n){
d_out[i]=__float2uint_rd(d_in[i]);
}
}
|
54cd5b92f2f22df1c554f6733cbe7678128aee7e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Brooke Husic and Jared Dunnmon
* Final project CME 253
* Due Feb 17 2017
*/
#include <fstream>
#include <iostream>
#include <math.h>
#include <string>
#include <vector>
#include <chrono>
#include "./debug.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (hipPeekAtLastError()) != hipSuccess ) \
{printf("Error %s at %s:%d\n", hipGetErrorString(hipGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
typedef std::chrono::high_resolution_clock Clock;
/* input protein file and get its xyz coordinates */
void ProteinSetup(std::string protein_inputfile,
std::vector<int>& prot_atomnums,
std::vector<int>& prot_resnums,
std::vector<std::vector<double>>& prot_xyz_coords){
std::ifstream f(protein_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int atomnum, resnum;
double x, y, z, occ, temp;
while (f >> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
prot_atomnums.push_back(atomnum);
prot_resnums.push_back(resnum);
prot_xyz_coords.push_back(temp_coord);
}
// some checks
if(prot_atomnums.size() != prot_resnums.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
if(prot_atomnums.size() != prot_xyz_coords.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
}
std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl;
}
/* input ligand file and get its xyz coordinates */
void LigandTrajSetup(std::string ligand_inputfile,
std::vector<int>& lig_trajnums,
std::vector<int>& lig_atomnums,
std::vector<int>& lig_resnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::ifstream f(ligand_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int trajnum, atomnum, resnum;
double x, y, z, occ, temp;
while (f >> trajnum
>> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
lig_trajnums.push_back(trajnum);
lig_atomnums.push_back(atomnum);
lig_resnums.push_back(resnum);
lig_xyz_coords.push_back(temp_coord);
}
// some checks
if(lig_atomnums.size() != lig_trajnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_resnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_xyz_coords.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
}
std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl;
std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms
}
/* simple squared distance */
double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){
double dist_squared;
dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0])
+ (v1[1]-v2[1])*(v1[1]-v2[1])
+ (v1[2]-v2[2])*(v1[2]-v2[2]) };
return dist_squared;
}
/* cpp contact featurizer */
std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums,
std::vector<std::vector<double>>& prot_xyz_coords,
std::vector<int>& lig_trajnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::vector<double> all_distances;
for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){
for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){
double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii],
prot_xyz_coords[jj]);
temp_dist = sqrt(temp_dist)/10.;
all_distances.push_back(temp_dist);
}
}
return all_distances;
}
/* version without SMEM
__global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
}
*/
/* cuda contact featurizer with SMEM */
__global__ void cuContactsSMEM(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ double temp[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y+1];
if ( (pidx < plength[0]) && (lidx< llength[0])){
temp[threadIdx.x][threadIdx.y] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = temp[threadIdx.x][threadIdx.y];
}
}
int main(int argc, char *argv[])
{
if (argc != 3)
{
std::cout << "Usage:" << std::endl;
{std::cout << " " << argv[0] << " <protein input file> "
<< " <ligand input file> " << std::endl;}
return 0;
}
std::string protein_inputfile = argv[1];
std::string ligand_inputfile = argv[2];
std::vector<int> prot_atomnums;
std::vector<int> prot_resnums;
std::vector<std::vector<double>> prot_xyz_coords;
std::vector<int> lig_trajnums;
std::vector<int> lig_atomnums;
std::vector<int> lig_resnums;
std::vector<std::vector<double>> lig_xyz_coords;
ProteinSetup(protein_inputfile,
prot_atomnums,
prot_resnums,
prot_xyz_coords);
LigandTrajSetup(ligand_inputfile,
lig_trajnums,
lig_atomnums,
lig_resnums,
lig_xyz_coords);
auto cpp_start = Clock::now();
/* compute distanes using cpp*/
std::vector<double> distances = LPContactFeaturizer(prot_atomnums,
prot_xyz_coords,
lig_trajnums,
lig_xyz_coords);
auto cpp_end = Clock::now();
/* print out cpp time stats */
std::cout << "Number of distances to compute : " << distances.size() << std::endl;
std::cout << "Cpp distances calculated in "
<< std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count()
<< " microseconds" << std::endl;
double *pxyz, *lxyz, *cudists;
double *d_pxyz, *d_lxyz, *d_cudists;
int *plength, *d_plength;
int *llength, *d_llength;
int protein_size = prot_atomnums.size()*3;
int ligand_traj_size = lig_trajnums.size()*3;
int cudists_size = protein_size/3 * ligand_traj_size/3;
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( hipMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) );
checkCUDA( hipMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) );
checkCUDA( hipMalloc( (void **) &d_cudists, cudists_size*sizeof(double) ));
checkCUDA( hipMalloc( (void **) &d_plength, sizeof(int) ));
checkCUDA( hipMalloc( (void **) &d_llength, sizeof(int) ));
/* allocate space for host copies of a, b, c and setup input values */
pxyz = (double *)malloc( protein_size *sizeof(double));
lxyz = (double *)malloc( ligand_traj_size *sizeof(double));
cudists = (double *)malloc( cudists_size *sizeof(double));
plength = (int *)malloc( sizeof(int));
llength = (int *)malloc( sizeof(int));
for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){
pxyz[pp*3] = prot_xyz_coords[pp][0];
pxyz[pp*3+1] = prot_xyz_coords[pp][1];
pxyz[pp*3+2] = prot_xyz_coords[pp][2];
}
for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){
lxyz[ll*3] = lig_xyz_coords[ll][0];
lxyz[ll*3+1] = lig_xyz_coords[ll][1];
lxyz[ll*3+2] = lig_xyz_coords[ll][2];
}
plength[0] = prot_atomnums.size();
llength[0] = lig_trajnums.size();
/* copy inputs to device */
checkCUDA( hipMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), hipMemcpyHostToDevice ) );
checkCUDA( hipMemcpy( d_plength, plength, sizeof(int), hipMemcpyHostToDevice) );
checkCUDA( hipMemcpy( d_llength, llength, sizeof(int), hipMemcpyHostToDevice) );
/* zero out the C array */
checkCUDA( hipMemset( d_cudists, 0, cudists_size*sizeof(double) ) );
/* setup threadblock size and grid sizes*/
dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 blocks(plength[0]/threads.x+1,
llength[0]/threads.y+1,
1 );
/* check if threads and blocks are OK */
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, 0);
if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) {
printf("Too many threads per block \n");
}
if (threads.x > prop.maxThreadsDim[0]) {
printf("Too many threads in x-direction \n");
}
if (threads.y > prop.maxThreadsDim[1]) {
printf("Too many threads in y-direction \n");
}
if (threads.z > prop.maxThreadsDim[2]) {
printf("Too many threads in z-direction \n");
}
printf("Ready to launch kernel\n");
auto cuda_start = Clock::now();
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( cuContactsSMEM), dim3(blocks), dim3(threads) , 0, 0, d_pxyz, d_lxyz, d_cudists, d_plength, d_llength );
checkKERNEL();
auto cuda_mid = Clock::now();
/* print out CUDA time stats */
// std::cout << "CUDA distances calculated in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count()
// << " microseconds" << std::endl;
/* copy result back to host */
checkCUDA( hipMemcpy( cudists, d_cudists, cudists_size*sizeof(double), hipMemcpyDeviceToHost ) );
auto cuda_end = Clock::now();
// std::cout << "CUDA distances copied in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count()
// << " microseconds" << std::endl;
std::cout << "CUDA distances calculated in: "
<< std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count()
<< " microseconds" << std::endl;
/* print out distance pairs to a file */
std::ofstream f("distances.txt");
if(f.is_open()){
for(unsigned int k = 0; k < distances.size(); k++){
f << distances[k] << " " << cudists[k] << std::endl;
}
}
f.close();
free(pxyz);
free(lxyz);
free(cudists);
free(plength);
checkCUDA( hipFree( d_pxyz ) );
checkCUDA( hipFree( d_lxyz ) );
checkCUDA( hipFree( d_cudists ) );
checkCUDA( hipFree( d_plength ) );
checkCUDA( hipDeviceReset () );
return 0;
} /* end main */ | 54cd5b92f2f22df1c554f6733cbe7678128aee7e.cu | /* Brooke Husic and Jared Dunnmon
* Final project CME 253
* Due Feb 17 2017
*/
#include <fstream>
#include <iostream>
#include <math.h>
#include <string>
#include <vector>
#include <chrono>
#include "./debug.h"
#ifdef DEBUG
#define CUDA_CALL(F) if( (F) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__); exit(-1);}
#define CUDA_CHECK() if( (cudaPeekAtLastError()) != cudaSuccess ) \
{printf("Error %s at %s:%d\n", cudaGetErrorString(cudaGetLastError()), \
__FILE__,__LINE__-1); exit(-1);}
#else
#define CUDA_CALL(F) (F)
#define CUDA_CHECK()
#endif
#define THREADS_PER_BLOCK_X 32
#define THREADS_PER_BLOCK_Y 32
typedef std::chrono::high_resolution_clock Clock;
/* input protein file and get its xyz coordinates */
void ProteinSetup(std::string protein_inputfile,
std::vector<int>& prot_atomnums,
std::vector<int>& prot_resnums,
std::vector<std::vector<double>>& prot_xyz_coords){
std::ifstream f(protein_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int atomnum, resnum;
double x, y, z, occ, temp;
while (f >> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
prot_atomnums.push_back(atomnum);
prot_resnums.push_back(resnum);
prot_xyz_coords.push_back(temp_coord);
}
// some checks
if(prot_atomnums.size() != prot_resnums.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
if(prot_atomnums.size() != prot_xyz_coords.size()){
std::cerr << "ERROR: Problem in protein file" << std::endl;
}
}
std::cout << "Lines in protein file : " << prot_atomnums.size() << std::endl;
}
/* input ligand file and get its xyz coordinates */
void LigandTrajSetup(std::string ligand_inputfile,
std::vector<int>& lig_trajnums,
std::vector<int>& lig_atomnums,
std::vector<int>& lig_resnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::ifstream f(ligand_inputfile.c_str());
if (f.is_open()) {
std::string klass, code, resname, chain;
int trajnum, atomnum, resnum;
double x, y, z, occ, temp;
while (f >> trajnum
>> klass >> atomnum >> code >> resname
>> chain >> resnum >> x >> y >> z
>> occ >> temp){
std::vector<double> temp_coord;
temp_coord.push_back(x);
temp_coord.push_back(y);
temp_coord.push_back(z);
lig_trajnums.push_back(trajnum);
lig_atomnums.push_back(atomnum);
lig_resnums.push_back(resnum);
lig_xyz_coords.push_back(temp_coord);
}
// some checks
if(lig_atomnums.size() != lig_trajnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_resnums.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
if(lig_atomnums.size() != lig_xyz_coords.size()){
std::cerr << "ERROR: Problem in ligand file" << std::endl;
}
}
std::cout << "Lines in ligand file : " << lig_atomnums.size() << std::endl;
std::cout << "Ligand poses in file : " << lig_atomnums.size()/17 << std::endl; //all our ligands have 17 atoms
}
/* simple squared distance */
double ComputeSquaredDistance(std::vector<double> v1, std::vector<double> v2){
double dist_squared;
dist_squared = { (v1[0]-v2[0])*(v1[0]-v2[0])
+ (v1[1]-v2[1])*(v1[1]-v2[1])
+ (v1[2]-v2[2])*(v1[2]-v2[2]) };
return dist_squared;
}
/* cpp contact featurizer */
std::vector<double> LPContactFeaturizer(std::vector<int>& prot_atomnums,
std::vector<std::vector<double>>& prot_xyz_coords,
std::vector<int>& lig_trajnums,
std::vector<std::vector<double>>& lig_xyz_coords){
std::vector<double> all_distances;
for (unsigned int ii = 0; ii < lig_trajnums.size(); ii++){
for (unsigned int jj =0; jj < prot_atomnums.size(); jj++){
double temp_dist = ComputeSquaredDistance(lig_xyz_coords[ii],
prot_xyz_coords[jj]);
temp_dist = sqrt(temp_dist)/10.;
all_distances.push_back(temp_dist);
}
}
return all_distances;
}
/* version without SMEM
__global__ void cuContacts(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
}
*/
/* cuda contact featurizer with SMEM */
__global__ void cuContactsSMEM(double *pxyz, double *lxyz, double *cudists, int *plength, int *llength)
{
int pidx = threadIdx.x + blockIdx.x * blockDim.x;
int lidx = threadIdx.y + blockIdx.y * blockDim.y;
__shared__ double temp[THREADS_PER_BLOCK_X][THREADS_PER_BLOCK_Y+1];
if ( (pidx < plength[0]) && (lidx< llength[0])){
temp[threadIdx.x][threadIdx.y] = ( sqrt(
(pxyz[pidx*3]-lxyz[lidx*3])*(pxyz[pidx*3]-lxyz[lidx*3])
+ (pxyz[pidx*3+1]-lxyz[lidx*3+1])*(pxyz[pidx*3+1]-lxyz[lidx*3+1])
+ (pxyz[pidx*3+2]-lxyz[lidx*3+2])*(pxyz[pidx*3+2]-lxyz[lidx*3+2]) )/10. );
}
__syncthreads();
if ( (pidx < plength[0]) && (lidx< llength[0])){
cudists[pidx+plength[0]*lidx] = temp[threadIdx.x][threadIdx.y];
}
}
int main(int argc, char *argv[])
{
if (argc != 3)
{
std::cout << "Usage:" << std::endl;
{std::cout << " " << argv[0] << " <protein input file> "
<< " <ligand input file> " << std::endl;}
return 0;
}
std::string protein_inputfile = argv[1];
std::string ligand_inputfile = argv[2];
std::vector<int> prot_atomnums;
std::vector<int> prot_resnums;
std::vector<std::vector<double>> prot_xyz_coords;
std::vector<int> lig_trajnums;
std::vector<int> lig_atomnums;
std::vector<int> lig_resnums;
std::vector<std::vector<double>> lig_xyz_coords;
ProteinSetup(protein_inputfile,
prot_atomnums,
prot_resnums,
prot_xyz_coords);
LigandTrajSetup(ligand_inputfile,
lig_trajnums,
lig_atomnums,
lig_resnums,
lig_xyz_coords);
auto cpp_start = Clock::now();
/* compute distanes using cpp*/
std::vector<double> distances = LPContactFeaturizer(prot_atomnums,
prot_xyz_coords,
lig_trajnums,
lig_xyz_coords);
auto cpp_end = Clock::now();
/* print out cpp time stats */
std::cout << "Number of distances to compute : " << distances.size() << std::endl;
std::cout << "Cpp distances calculated in "
<< std::chrono::duration_cast<std::chrono::microseconds>(cpp_end - cpp_start).count()
<< " microseconds" << std::endl;
double *pxyz, *lxyz, *cudists;
double *d_pxyz, *d_lxyz, *d_cudists;
int *plength, *d_plength;
int *llength, *d_llength;
int protein_size = prot_atomnums.size()*3;
int ligand_traj_size = lig_trajnums.size()*3;
int cudists_size = protein_size/3 * ligand_traj_size/3;
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of a, b, c */
checkCUDA( cudaMalloc( (void **) &d_pxyz, protein_size*sizeof(double)) );
checkCUDA( cudaMalloc( (void **) &d_lxyz, ligand_traj_size*sizeof(double)) );
checkCUDA( cudaMalloc( (void **) &d_cudists, cudists_size*sizeof(double) ));
checkCUDA( cudaMalloc( (void **) &d_plength, sizeof(int) ));
checkCUDA( cudaMalloc( (void **) &d_llength, sizeof(int) ));
/* allocate space for host copies of a, b, c and setup input values */
pxyz = (double *)malloc( protein_size *sizeof(double));
lxyz = (double *)malloc( ligand_traj_size *sizeof(double));
cudists = (double *)malloc( cudists_size *sizeof(double));
plength = (int *)malloc( sizeof(int));
llength = (int *)malloc( sizeof(int));
for(unsigned int pp = 0; pp < prot_atomnums.size(); pp++){
pxyz[pp*3] = prot_xyz_coords[pp][0];
pxyz[pp*3+1] = prot_xyz_coords[pp][1];
pxyz[pp*3+2] = prot_xyz_coords[pp][2];
}
for(unsigned int ll = 0; ll < lig_trajnums.size(); ll++){
lxyz[ll*3] = lig_xyz_coords[ll][0];
lxyz[ll*3+1] = lig_xyz_coords[ll][1];
lxyz[ll*3+2] = lig_xyz_coords[ll][2];
}
plength[0] = prot_atomnums.size();
llength[0] = lig_trajnums.size();
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_pxyz, pxyz, protein_size*sizeof(double), cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_lxyz, lxyz, ligand_traj_size*sizeof(double), cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemcpy( d_plength, plength, sizeof(int), cudaMemcpyHostToDevice) );
checkCUDA( cudaMemcpy( d_llength, llength, sizeof(int), cudaMemcpyHostToDevice) );
/* zero out the C array */
checkCUDA( cudaMemset( d_cudists, 0, cudists_size*sizeof(double) ) );
/* setup threadblock size and grid sizes*/
dim3 threads(THREADS_PER_BLOCK_X, THREADS_PER_BLOCK_Y, 1);
dim3 blocks(plength[0]/threads.x+1,
llength[0]/threads.y+1,
1 );
/* check if threads and blocks are OK */
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, 0);
if (threads.x * threads.y * threads.z > prop.maxThreadsPerBlock) {
printf("Too many threads per block \n");
}
if (threads.x > prop.maxThreadsDim[0]) {
printf("Too many threads in x-direction \n");
}
if (threads.y > prop.maxThreadsDim[1]) {
printf("Too many threads in y-direction \n");
}
if (threads.z > prop.maxThreadsDim[2]) {
printf("Too many threads in z-direction \n");
}
printf("Ready to launch kernel\n");
auto cuda_start = Clock::now();
/* launch the kernel on the GPU */
cuContactsSMEM<<< blocks, threads >>>( d_pxyz, d_lxyz, d_cudists, d_plength, d_llength );
checkKERNEL();
auto cuda_mid = Clock::now();
/* print out CUDA time stats */
// std::cout << "CUDA distances calculated in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_mid - cuda_start).count()
// << " microseconds" << std::endl;
/* copy result back to host */
checkCUDA( cudaMemcpy( cudists, d_cudists, cudists_size*sizeof(double), cudaMemcpyDeviceToHost ) );
auto cuda_end = Clock::now();
// std::cout << "CUDA distances copied in "
// << std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_mid).count()
// << " microseconds" << std::endl;
std::cout << "CUDA distances calculated in: "
<< std::chrono::duration_cast<std::chrono::microseconds>(cuda_end - cuda_start).count()
<< " microseconds" << std::endl;
/* print out distance pairs to a file */
std::ofstream f("distances.txt");
if(f.is_open()){
for(unsigned int k = 0; k < distances.size(); k++){
f << distances[k] << " " << cudists[k] << std::endl;
}
}
f.close();
free(pxyz);
free(lxyz);
free(cudists);
free(plength);
checkCUDA( cudaFree( d_pxyz ) );
checkCUDA( cudaFree( d_lxyz ) );
checkCUDA( cudaFree( d_cudists ) );
checkCUDA( cudaFree( d_plength ) );
checkCUDA( cudaDeviceReset () );
return 0;
} /* end main */ |
dc271fd6d8954443e343c70c3a84cd05e70dfaa8.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSpheropolyhedron>
template hipError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template hipError_t gpu_hpmc_update<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template hipError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template hipError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
| dc271fd6d8954443e343c70c3a84cd05e70dfaa8.cu | // Copyright (c) 2009-2019 The Regents of the University of Michigan
// This file is part of the HOOMD-blue project, released under the BSD 3-Clause License.
#include "ComputeFreeVolumeGPU.cuh"
#include "IntegratorHPMCMonoGPU.cuh"
#include "IntegratorHPMCMonoImplicitGPU.cuh"
#include "IntegratorHPMCMonoImplicitNewGPU.cuh"
#include "ShapeSpheropolyhedron.h"
#include "ShapeUnion.h"
namespace hpmc
{
namespace detail
{
//! HPMC kernels for ShapeUnion<ShapeSpheropolyhedron>
template cudaError_t gpu_hpmc_free_volume<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_free_volume_args_t &args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template cudaError_t gpu_hpmc_update<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_count_overlaps<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template cudaError_t gpu_hpmc_insert_depletants_queue<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
template cudaError_t gpu_hpmc_implicit_accept_reject_new<ShapeUnion<ShapeSpheropolyhedron> >(const hpmc_implicit_args_new_t& args,
const typename ShapeUnion<ShapeSpheropolyhedron> ::param_type *d_params);
}; // end namespace detail
} // end namespace hpmc
|
abc983e1159b328db0ad6406c290e808cbd105ac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<iostream>
#include<chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum_mean(int *a, int *b, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
for(int i = large_id; i < min(large_id+ 256, n); i++) {
sum += a[i];
}
b[large_id] = sum;
}
void sum_mean_cpu(int *a, int *b, int n) {
int sum = 0;
for(int i = 0; i < n; i++) {
sum += a[i];
}
b[0] = sum;
}
int main(void) {
int *a, *b, n;
int *dev_a, *dev_b;
cout<<"Enter number of elements in array: "<<endl;
cin>>n;
//a = new int[n];
//b = new int[1];
a = (int *)malloc(n * sizeof(int));
b = (int *)malloc(sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i + 1;
}
/*cout<<"The numbers stored in the array are: "<<endl;
for(int i = 0; i < n; i++) {
cout<<a[i] << " ";
}
cout<<endl;*/
hipMalloc(&dev_a, n * sizeof(int));
hipMalloc(&dev_b, sizeof(int));
hipMemcpy(dev_a, a, n * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, sizeof(int), hipMemcpyHostToDevice);
int blocks, threads;
blocks = threads = ceil(n * 1.0f/256.0f);
auto start = high_resolution_clock::now();
hipLaunchKernelGGL(( sum_mean) , dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, n);
auto stop = high_resolution_clock::now();
cout<<"For GPU: "<<endl;
hipMemcpy(b, dev_b, sizeof(int), hipMemcpyDeviceToHost);
cout<<"The sum is: " << b[0] << "\nThe mean is: " << b[0] / n << "\nThe time taken for parallel execution is: " << duration_cast<microseconds>(stop-start).count() << endl;
b[0] = 0;
start = high_resolution_clock::now();
sum_mean_cpu (a, b, n);
stop = high_resolution_clock::now();
cout<<"For CPU: "<<endl;
cout<<"The sum is: " << b[0] << "\nThe mean is: " << b[0] / n << "\nThe time taken for serial execution is: " << duration_cast<microseconds>(stop-start).count() << endl;
} | abc983e1159b328db0ad6406c290e808cbd105ac.cu | #include<iostream>
#include<chrono>
using namespace std;
using namespace std::chrono;
__global__ void sum_mean(int *a, int *b, int n) {
int large_id = blockIdx.x * blockDim.x + threadIdx.x;
int sum = 0;
for(int i = large_id; i < min(large_id+ 256, n); i++) {
sum += a[i];
}
b[large_id] = sum;
}
void sum_mean_cpu(int *a, int *b, int n) {
int sum = 0;
for(int i = 0; i < n; i++) {
sum += a[i];
}
b[0] = sum;
}
int main(void) {
int *a, *b, n;
int *dev_a, *dev_b;
cout<<"Enter number of elements in array: "<<endl;
cin>>n;
//a = new int[n];
//b = new int[1];
a = (int *)malloc(n * sizeof(int));
b = (int *)malloc(sizeof(int));
for(int i = 0; i < n; i++) {
a[i] = i + 1;
}
/*cout<<"The numbers stored in the array are: "<<endl;
for(int i = 0; i < n; i++) {
cout<<a[i] << " ";
}
cout<<endl;*/
cudaMalloc(&dev_a, n * sizeof(int));
cudaMalloc(&dev_b, sizeof(int));
cudaMemcpy(dev_a, a, n * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, sizeof(int), cudaMemcpyHostToDevice);
int blocks, threads;
blocks = threads = ceil(n * 1.0f/256.0f);
auto start = high_resolution_clock::now();
sum_mean <<<blocks, threads>>> (dev_a, dev_b, n);
auto stop = high_resolution_clock::now();
cout<<"For GPU: "<<endl;
cudaMemcpy(b, dev_b, sizeof(int), cudaMemcpyDeviceToHost);
cout<<"The sum is: " << b[0] << "\nThe mean is: " << b[0] / n << "\nThe time taken for parallel execution is: " << duration_cast<microseconds>(stop-start).count() << endl;
b[0] = 0;
start = high_resolution_clock::now();
sum_mean_cpu (a, b, n);
stop = high_resolution_clock::now();
cout<<"For CPU: "<<endl;
cout<<"The sum is: " << b[0] << "\nThe mean is: " << b[0] / n << "\nThe time taken for serial execution is: " << duration_cast<microseconds>(stop-start).count() << endl;
} |
af57d74496d6c24310500914234a93552a623fee.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void convoluteGPU (float *in, float *out, int w, int h, int nc, float *kernel, int kernelRadius)
{
int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein
int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein
int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein
//printf("thread id check\n");
int kernelWidth = 2 * kernelRadius + 1;
int currentlocation = iz*w*h + ix + iy * w;
out[currentlocation]=0;
//printf("current location check\n");
if (ix < w && iy <h && iz < nc){
for (int x=0; x<kernelWidth; x++) {
for(int y=0; y<kernelWidth; y++) {
int cx = max(min(w-1, ix + x - kernelRadius), 0);
int cy = max(min(h-1, iy + y - kernelRadius), 0);
out[currentlocation] += kernel[x+y*kernelWidth] * in[iz*w*h+cx+cy*w];
}
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
hipDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut_gray(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
float *imgOutConvolutedCPU = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
// create kernel
float sigma=1.0;
int radius_kernel=ceil(3*sigma);
int width_kernel=2*radius_kernel+1;
float sum=0.0;
float sigmasquare_x2 = 2.0 * sigma * sigma;
float gKernel[width_kernel*width_kernel];
float mKernel=0.0;
for (int x=0;x<width_kernel;x++)
{
for(int y=0;y<width_kernel;y++)
{
int a= x-radius_kernel;
int b= y-radius_kernel;
gKernel[x+y*width_kernel] = expf(-(a*a+b*b)/sigmasquare_x2)/(M_PI * sigmasquare_x2);
sum += gKernel[x+y*width_kernel];
if(gKernel[x+y*width_kernel] > mKernel){
mKernel = gKernel[x+y*width_kernel];
}
}
}
float copy_gKernel[width_kernel*width_kernel];
for(int i = 0; i < width_kernel; ++i)
{
for (int j = 0; j < width_kernel; ++j)
{
gKernel[i+j*width_kernel]/=sum;
//cout<<gKernel[i+j*width_kernel]<<"\t";
copy_gKernel[i+j*width_kernel]=gKernel[i+j*width_kernel]/gKernel[width_kernel/2+(width_kernel/2)*width_kernel];
}
//cout<<endl;
}
cv::Mat mOutKernel(width_kernel,width_kernel,CV_32FC1);
convert_layered_to_mat(mOutKernel, copy_gKernel);
showImage("Gaussian Kernel", mOutKernel, 250, 100);
// apply convolution with clamping
Timer timer; timer.start();
for(int c=0; c<nc; c++) {
for (int ix=0; ix<w; ix++) {
for(int iy=0; iy<h; iy++) {
int currentlocation = h*w*c +ix+iy*w;
for (int x=0; x<width_kernel; x++) {
for(int y=0; y<width_kernel; y++) {
//clamping strategy
int cx = max(min(w-1, ix + x - radius_kernel), 0);
int cy = max(min(h-1, iy + y - radius_kernel), 0);
imgOutConvolutedCPU[currentlocation] += gKernel[x+y*width_kernel] * imgIn[h*w*c+cx+cy*w];
//cout<<imgOutConvolutedCPU[currentlocation]<<endl;
}
}
}
}
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "time on CPU: " << t*1000 << " ms" << endl;
convert_layered_to_mat(mOut, imgOutConvolutedCPU);
showImage("Output Convoluted CPU", mOut, 150, 100);
//--Init for Cuda kernel call
float *imgOutConvolutedGPU = new float[(size_t)w*h*nc];
float *g_imgIn;
float *g_imgOut;
float *g_gKernel;
hipMalloc( &g_imgIn, w*h*nc * sizeof(float) );CUDA_CHECK;
hipMalloc( &g_imgOut, w*h*nc * sizeof(float) );CUDA_CHECK;
hipMalloc( &g_gKernel, width_kernel * width_kernel * sizeof(float) );CUDA_CHECK;
hipMemcpy( g_imgIn, imgIn, w*h*nc * sizeof(float), hipMemcpyHostToDevice );CUDA_CHECK;
hipMemcpy( g_gKernel, gKernel, width_kernel * width_kernel * sizeof(float), hipMemcpyHostToDevice );CUDA_CHECK;
dim3 Block = dim3(32,32,1);
dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (nc+ Block.z -1) / Block.z);
//call cuda kernel for convolution
Timer timer1; timer1.start();
hipLaunchKernelGGL(( convoluteGPU) , dim3(Grid),dim3(Block), 0, 0, g_imgIn, g_imgOut, w, h, nc, g_gKernel, radius_kernel);CUDA_CHECK;
timer1.end(); t = timer1.get(); // elapsed time in seconds
cout << "time on GPU: " << t*1000 << " ms" << endl;
//copy output gpu->cpu
hipMemcpy(imgOutConvolutedGPU,g_imgOut, nc*h*w * sizeof(float), hipMemcpyDeviceToHost );
CUDA_CHECK;
//free gpu allocation
hipFree(g_imgOut);
CUDA_CHECK;
hipFree(g_imgIn);
CUDA_CHECK;
hipFree(g_gKernel);
CUDA_CHECK;
convert_layered_to_mat(mOut, imgOutConvolutedGPU);
showImage("Output Convoluted GPU", mOut, 200, 100);
convert_layered_to_mat(mOut, imgIn);
showImage("Input Image", mOut, 250, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
| af57d74496d6c24310500914234a93552a623fee.cu |
// ###
// ###
// ### Practical Course: GPU Programming in Computer Vision
// ###
// ###
// ### Technical University Munich, Computer Vision Group
// ### Summer Semester 2017, September 11 - October 9
// ###
#include "helper.h"
#include <iostream>
#include <stdio.h>
using namespace std;
// uncomment to use the camera
//#define CAMERA
__global__ void convoluteGPU (float *in, float *out, int w, int h, int nc, float *kernel, int kernelRadius)
{
int ix = threadIdx.x + blockDim.x * blockIdx.x;//xaxis of imagein
int iy = threadIdx.y + blockDim.y * blockIdx.y;//yaxis of imagein
int iz = threadIdx.z + blockDim.z * blockIdx.z; //channels imagein
//printf("thread id check\n");
int kernelWidth = 2 * kernelRadius + 1;
int currentlocation = iz*w*h + ix + iy * w;
out[currentlocation]=0;
//printf("current location check\n");
if (ix < w && iy <h && iz < nc){
for (int x=0; x<kernelWidth; x++) {
for(int y=0; y<kernelWidth; y++) {
int cx = max(min(w-1, ix + x - kernelRadius), 0);
int cy = max(min(h-1, iy + y - kernelRadius), 0);
out[currentlocation] += kernel[x+y*kernelWidth] * in[iz*w*h+cx+cy*w];
}
}
}
}
int main(int argc, char **argv)
{
// Before the GPU can process your kernels, a so called "CUDA context" must be initialized
// This happens on the very first call to a CUDA function, and takes some time (around half a second)
// We will do it right here, so that the run time measurements are accurate
cudaDeviceSynchronize(); CUDA_CHECK;
// Reading command line parameters:
// getParam("param", var, argc, argv) looks whether "-param xyz" is specified, and if so stores the value "xyz" in "var"
// If "-param" is not specified, the value of "var" remains unchanged
//
// return value: getParam("param", ...) returns true if "-param" is specified, and false otherwise
#ifdef CAMERA
#else
// input image
string image = "";
bool ret = getParam("i", image, argc, argv);
if (!ret) cerr << "ERROR: no image specified" << endl;
if (argc <= 1) { cout << "Usage: " << argv[0] << " -i <image> [-repeats <repeats>] [-gray]" << endl; return 1; }
#endif
// number of computation repetitions to get a better run time measurement
int repeats = 1;
getParam("repeats", repeats, argc, argv);
cout << "repeats: " << repeats << endl;
// load the input image as grayscale if "-gray" is specifed
bool gray = false;
getParam("gray", gray, argc, argv);
cout << "gray: " << gray << endl;
// ### Define your own parameters here as needed
// Init camera / Load input image
#ifdef CAMERA
// Init camera
cv::VideoCapture camera(0);
if(!camera.isOpened()) { cerr << "ERROR: Could not open camera" << endl; return 1; }
int camW = 640;
int camH = 480;
camera.set(CV_CAP_PROP_FRAME_WIDTH,camW);
camera.set(CV_CAP_PROP_FRAME_HEIGHT,camH);
// read in first frame to get the dimensions
cv::Mat mIn;
camera >> mIn;
#else
// Load the input image using opencv (load as grayscale if "gray==true", otherwise as is (may be color or grayscale))
cv::Mat mIn = cv::imread(image.c_str(), (gray? CV_LOAD_IMAGE_GRAYSCALE : -1));
// check
if (mIn.data == NULL) { cerr << "ERROR: Could not load image " << image << endl; return 1; }
#endif
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
// get image dimensions
int w = mIn.cols; // width
int h = mIn.rows; // height
int nc = mIn.channels(); // number of channels
cout << "image: " << w << " x " << h << endl;
// Set the output image format
// ###
// ###
// ### TODO: Change the output image format as needed
// ###
// ###
cv::Mat mOut(h,w,mIn.type()); // mOut will have the same number of channels as the input image, nc layers
//cv::Mat mOut(h,w,CV_32FC3); // mOut will be a color image, 3 layers
//cv::Mat mOut_gray(h,w,CV_32FC1); // mOut will be a grayscale image, 1 layer
// ### Define your own output images here as needed
// Allocate arrays
// input/output image width: w
// input/output image height: h
// input image number of channels: nc
// output image number of channels: mOut.channels(), as defined above (nc, 3, or 1)
// allocate raw input image array
float *imgIn = new float[(size_t)w*h*nc];
// allocate raw output array (the computation result will be stored in this array, then later converted to mOut for displaying)
float *imgOut = new float[(size_t)w*h*mOut.channels()];
float *imgOutConvolutedCPU = new float[(size_t)w*h*mOut.channels()];
// For camera mode: Make a loop to read in camera frames
#ifdef CAMERA
// Read a camera image frame every 30 milliseconds:
// cv::waitKey(30) waits 30 milliseconds for a keyboard input,
// returns a value <0 if no key is pressed during this time, returns immediately with a value >=0 if a key is pressed
while (cv::waitKey(30) < 0)
{
// Get camera image
camera >> mIn;
// convert to float representation (opencv loads image values as single bytes by default)
mIn.convertTo(mIn,CV_32F);
// convert range of each channel to [0,1] (opencv default is [0,255])
mIn /= 255.f;
#endif
// Init raw input image array
// opencv images are interleaved: rgb rgb rgb... (actually bgr bgr bgr...)
// But for CUDA it's better to work with layered images: rrr... ggg... bbb...
// So we will convert as necessary, using interleaved "cv::Mat" for loading/saving/displaying, and layered "float*" for CUDA computations
convert_mat_to_layered (imgIn, mIn);
// ###
// ###
// ### TODO: Main computation
// ###
// ###
// create kernel
float sigma=1.0;
int radius_kernel=ceil(3*sigma);
int width_kernel=2*radius_kernel+1;
float sum=0.0;
float sigmasquare_x2 = 2.0 * sigma * sigma;
float gKernel[width_kernel*width_kernel];
float mKernel=0.0;
for (int x=0;x<width_kernel;x++)
{
for(int y=0;y<width_kernel;y++)
{
int a= x-radius_kernel;
int b= y-radius_kernel;
gKernel[x+y*width_kernel] = expf(-(a*a+b*b)/sigmasquare_x2)/(M_PI * sigmasquare_x2);
sum += gKernel[x+y*width_kernel];
if(gKernel[x+y*width_kernel] > mKernel){
mKernel = gKernel[x+y*width_kernel];
}
}
}
float copy_gKernel[width_kernel*width_kernel];
for(int i = 0; i < width_kernel; ++i)
{
for (int j = 0; j < width_kernel; ++j)
{
gKernel[i+j*width_kernel]/=sum;
//cout<<gKernel[i+j*width_kernel]<<"\t";
copy_gKernel[i+j*width_kernel]=gKernel[i+j*width_kernel]/gKernel[width_kernel/2+(width_kernel/2)*width_kernel];
}
//cout<<endl;
}
cv::Mat mOutKernel(width_kernel,width_kernel,CV_32FC1);
convert_layered_to_mat(mOutKernel, copy_gKernel);
showImage("Gaussian Kernel", mOutKernel, 250, 100);
// apply convolution with clamping
Timer timer; timer.start();
for(int c=0; c<nc; c++) {
for (int ix=0; ix<w; ix++) {
for(int iy=0; iy<h; iy++) {
int currentlocation = h*w*c +ix+iy*w;
for (int x=0; x<width_kernel; x++) {
for(int y=0; y<width_kernel; y++) {
//clamping strategy
int cx = max(min(w-1, ix + x - radius_kernel), 0);
int cy = max(min(h-1, iy + y - radius_kernel), 0);
imgOutConvolutedCPU[currentlocation] += gKernel[x+y*width_kernel] * imgIn[h*w*c+cx+cy*w];
//cout<<imgOutConvolutedCPU[currentlocation]<<endl;
}
}
}
}
}
timer.end(); float t = timer.get(); // elapsed time in seconds
cout << "time on CPU: " << t*1000 << " ms" << endl;
convert_layered_to_mat(mOut, imgOutConvolutedCPU);
showImage("Output Convoluted CPU", mOut, 150, 100);
//--Init for Cuda kernel call
float *imgOutConvolutedGPU = new float[(size_t)w*h*nc];
float *g_imgIn;
float *g_imgOut;
float *g_gKernel;
cudaMalloc( &g_imgIn, w*h*nc * sizeof(float) );CUDA_CHECK;
cudaMalloc( &g_imgOut, w*h*nc * sizeof(float) );CUDA_CHECK;
cudaMalloc( &g_gKernel, width_kernel * width_kernel * sizeof(float) );CUDA_CHECK;
cudaMemcpy( g_imgIn, imgIn, w*h*nc * sizeof(float), cudaMemcpyHostToDevice );CUDA_CHECK;
cudaMemcpy( g_gKernel, gKernel, width_kernel * width_kernel * sizeof(float), cudaMemcpyHostToDevice );CUDA_CHECK;
dim3 Block = dim3(32,32,1);
dim3 Grid = dim3((w +Block.x -1) / Block.x, (h + Block.y -1) / Block.y, (nc+ Block.z -1) / Block.z);
//call cuda kernel for convolution
Timer timer1; timer1.start();
convoluteGPU <<<Grid,Block>>> (g_imgIn, g_imgOut, w, h, nc, g_gKernel, radius_kernel);CUDA_CHECK;
timer1.end(); t = timer1.get(); // elapsed time in seconds
cout << "time on GPU: " << t*1000 << " ms" << endl;
//copy output gpu->cpu
cudaMemcpy(imgOutConvolutedGPU,g_imgOut, nc*h*w * sizeof(float), cudaMemcpyDeviceToHost );
CUDA_CHECK;
//free gpu allocation
cudaFree(g_imgOut);
CUDA_CHECK;
cudaFree(g_imgIn);
CUDA_CHECK;
cudaFree(g_gKernel);
CUDA_CHECK;
convert_layered_to_mat(mOut, imgOutConvolutedGPU);
showImage("Output Convoluted GPU", mOut, 200, 100);
convert_layered_to_mat(mOut, imgIn);
showImage("Input Image", mOut, 250, 100);
#ifdef CAMERA
// end of camera loop
}
#else
// wait for key inputs
cv::waitKey(0);
#endif
// save input and result
cv::imwrite("image_input.png",mIn*255.f); // "imwrite" assumes channel range [0,255]
cv::imwrite("image_result.png",mOut*255.f);
// free allocated arrays
delete[] imgIn;
delete[] imgOut;
// close all opencv windows
cvDestroyAllWindows();
return 0;
}
|
3c5342146758d072e510a6d4018bfcc0f3e93efb.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: April 2, 2021
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "cuda/CommonInternal.h"
#include "cuda/GridProcessingKernel.h"
#include "cuda/GridProcessingKernel3D.h"
#include "cuda/IterativeProcessingKernel.h"
#include "cuda/IterativeProcessingKernel3D.h"
#include "cuda/LevelwiseProcessingKernel.h"
#include "cuda/LinearProcessingKernel.h"
#include "cuda/LinearProcessingKernel3D.h"
#include "cuda/DataRefactoring.h"
#include <iostream>
#include <chrono>
namespace mgard_cuda {
bool store = false;
bool verify = false;
template <uint32_t D, typename T>
void decompose(Handle<D, T> &handle, T *dv, std::vector<int> ldvs,
int l_target) {
int *ldvs_h = new int[handle.D_padded];
for (int d = 0; d < handle.D_padded; d++) {
ldvs_h[d] = ldvs[d];
}
int *ldvs_d;
cudaMallocHelper((void **)&ldvs_d, handle.D_padded * sizeof(int));
cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h, handle.D_padded * sizeof(int),
H2D, 0);
std::string prefix = "decomp_";
if (sizeof(T) == sizeof(double))
prefix += "d_";
if (sizeof(T) == sizeof(float))
prefix += "f_";
for (int d = 0; d < D; d++)
prefix += std::to_string(handle.shapes_h[0][d]) + "_";
// std::cout << prefix << std::endl;
if (D <= 3) {
thrust::device_vector<int> empty_vector(0);
int unprocessed_n = 0;
int *unprocessed_dims = thrust::raw_pointer_cast(empty_vector.data());
for (int l = 0; l < l_target; ++l) {
// printf("[gpu] l = %d\n", l);
int stride = ::pow(2, l);
int Cstride = stride * 2;
int range_l = ::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = ::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
// printf("range_l: %d, range_lp1: %d\n", range_l, range_lp1);
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
// printf("input v\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
// printf("before pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// printf("before pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// thrust::device_vector<int> unprocessed_dims(0);
int lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = 0; s < 1; s++) {
lddv1 *= ldvs[s];
}
for (int s = 1; s < 2; s++) {
lddv2 *= ldvs[s];
}
for (int s = 0; s < 1; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = 1; s < 2; s++) {
lddw2 *= handle.ldws_h[s];
}
T *null = NULL;
// printf("gpk_reo\n");
// gpk_reo<D, T, D, true, true, 1>(handle,
// handle.shapes_h[l], handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, ldvs_d, unprocessed_n, unprocessed_dims, 2, 1,
// 0, handle.ratio[2][l], handle.ratio[1][l],
// handle.ratio[0][l], handle.dw, handle.ldws_h[0],
// handle.ldws_h[1], dv, ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l+1]),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l+1], 0),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1], 0, 0),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs_h[0],
// ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
gpk_reo_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l + 1], 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1], 0, 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l + 1],
handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("gpk_reo\n");
// //handle.sync(0);
verify_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
handle.dofs[0][l], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "gpk_reo_3d" + "_level_" + std::to_string(l),
store, verify);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// gpk_reo<D, T, D, true, false, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after interpolate\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// gpk_reo<D, T, D, false, true, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
thrust::device_vector<int> processed_dims(0);
if (D >= 1) {
// lpk_reo_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// ldvs_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0,
// handle.dist[0][l], handle.ratio[0][l],
// dv, ldvs_h[0], ldvs_h[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dist[0][l], handle.ratio[0][l], dv,
ldvs_h[0], ldvs_h[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1], handle.dw, handle.ldws_h[0], handle.ldws_h[1],
0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_1_3d" + "_level_" + std::to_string(l), store,
verify);
processed_dims.push_back(0);
// printf("after mass_trans_multiply_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0, handle.am[0][l+1],
// handle.bm[0][l+1], handle.dist[0][l+1], handle.dw,
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.am[0][l + 1], handle.bm[0][l + 1], handle.dist[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_1_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 1) {
lwpk<D, T, ADD>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], handle.dw, handle.ldws_d, dv,
ldvs_d, 0);
// printf("after add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D >= 2) {
// lpk_reo_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0,
// handle.dist[1][l], handle.ratio[1][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, 0), handle.ldws_h[0], handle.ldws_h[1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], 0), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, 0, handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dofs[1][l + 1], handle.dist[1][l], handle.ratio[1][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0, 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_2_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// ipk_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0, handle.am[1][l+1],
// handle.bm[1][l+1], handle.dist[1][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[1][l + 1], handle.bm[1][l + 1],
handle.dist[1][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_2_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// printf("before add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
if (D == 2) {
lwpk<D, T, ADD>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// printf("after add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D == 3) {
processed_dims.push_back(1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], 2, 1, 0, handle.dist[2][l],
handle.ratio[2][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1],
handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// lpk_reo_3_3d(handle,
// handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1], handle.dofs[2][l+1],
// handle.dist[2][l], handle.ratio[2][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_3_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0, handle.am[2][l+1],
// handle.bm[2][l+1], handle.dist[2][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
ipk_3_3d(
handle, handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[2][l + 1], handle.bm[2][l + 1],
handle.dist[2][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_3_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 3) {
lwpk<D, T, ADD>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "lwpk" + "_level_" + std::to_string(l), store, verify);
}
}
} // end of loop
// printf("output of decomposition\n");
// print_matrix_cuda(handle.dofs[2][0], handle.dofs[1][0],
// handle.dofs[0][0],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
if (D >= 4) {
for (int l = 0; l < l_target; ++l) {
// printf("[gpu] l = %d\n", l);
int stride = ::pow(2, l);
int Cstride = stride * 2;
int range_l = ::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = ::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[2][l] % 2 == 0;
int curr_dim_r, curr_dim_c, curr_dim_f;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
// printf("D_padded: %d\n", handle.D_padded);
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
// printf("%d %d\n", shape[d], shape_c[d]);
}
thrust::device_vector<int> unprocessed_dims;
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
// printf("input: \n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.db, handle.ldbs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// // print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// // handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0]);
// compare_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0],false);
// }
// printf("ldvs: ");
// for (int i = 0; i < D; i++) { std::cout << ldvs[i] << " ";}
// printf("\n");
// printf("ldws_h: ");
// for (int i = 0; i < D; i++) { std::cout << handle.ldws_h[i] << " ";}
// printf("\n");
// printf("lddv: %d %d lddw: %d %d\n", lddv1, lddv2, lddw1, lddw2);
// cudaMemset3DHelper(dv, ldvs[0]*sizeof(T), ldvs[0]*sizeof(T),
// ldvs[1], 0, handle.dofs[0][l]*sizeof(T),
// handle.dofs[1][l],
// handle.dofs[2][l]*handle.dofs[3][l]);
// printf("interpolate 1-3D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
gpk_reo<D, 3, T, true, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
0, handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("interpolate 4-5D\n");
curr_dim_f = 0, curr_dim_c = 3, curr_dim_r = 4;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("lddv1(%d), lddv2(%d), lddw1(%d), lddw2(%d)\n", lddv1, lddv2,
// lddw1, lddw2);
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_reo<D, 2, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_reo<D, 3, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after interpolate 4D:\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("reorder 1-3D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
lddw1 = 1, lddw2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
gpk_reo<D, 3, T, false, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, handle.ldws_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, handle.dw, lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw + get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw + get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("dv before calc\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, handle.db, handle.ldbs_d, 0);
// printf("db before calc\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.db+i*handle.ldbs_h[0]*handle.ldbs_h[1]*handle.ldbs_h[2],
// handle.ldbs_h[0], handle.ldbs_h[1],
// handle.ldbs_h[0]);
// }
// printf("calc coeff 1-5D\n");
curr_dim_f = 0, curr_dim_c = 3, curr_dim_r = 4;
lddv1 = 1, lddv2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_reo<D, 2, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_reo<D, 3, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after calc coeff 4D\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// // for (int i = 0; i < 1; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// start correction calculation
int prev_dim_r, prev_dim_c, prev_dim_f;
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
T *dw_out = handle.dw;
T *dw_in1 = dv;
T *dw_in2 =
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1]));
// printf("mass trans 1D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
thrust::device_vector<int> processed_dims;
lpk_reo_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], ldvs_d, handle.ldws_d,
handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_f][l], handle.ratio[curr_dim_f][l], dw_in1,
lddv1, lddv2,
// dv+get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
// 0, handle.dofs[0][l+1])),
dw_in2, lddv1, lddv2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 1D\n");
// ipk_1<D, T>(handle, shape, shape_c, handle.ldws_h, handle.ldws_h,
// processed_dims, curr_dim_r, curr_dim_c, curr_dim_f,
// handle.am[curr_dim_f][l+1], handle.bm[curr_dim_f][l+1],
// handle.dist[curr_dim_f][l+1], dw_out, lddw1, lddw2, 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_f][l + 1], handle.bm[curr_dim_f][l + 1],
handle.dist[curr_dim_f][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_f);
// printf("after solve_tridiag_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 2D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
0, handle.dofs[curr_dim_c][l + 1], 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, 0, handle.dofs[prev_dim_f][l + 1]));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 2D\n");
lpk_reo_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_c][l], handle.ratio[curr_dim_c][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, 0)),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], 0)),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 2D\n");
ipk_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_c][l + 1], handle.bm[curr_dim_c][l + 1],
handle.dist[curr_dim_c][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_c);
// printf("after solve_tridiag_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 3D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, handle.dofs[prev_dim_c][l + 1], 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 3D\n");
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, handle.dofs[2][l+1], 0, handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 3D\n");
ipk_3<D, T>(
handle, shape, shape_c, handle.ldws_h, handle.ldws_h, processed_dims,
curr_dim_r, curr_dim_c, curr_dim_f, handle.am[curr_dim_r][l + 1],
handle.bm[curr_dim_r][l + 1], handle.dist[curr_dim_r][l + 1], dw_out,
lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_r);
// printf("after solve_tridiag_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 4D+
for (int i = 3; i < D; i++) {
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = i;
dw_in1 = dw_out;
dw_in2 =
dw_out +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, prev_dim_r, prev_dim_c, prev_dim_f,
handle.dofs[prev_dim_r][l + 1], 0, 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans %dD\n", i+1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag %dD\n", i+1);
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(i);
}
// printf("after solve_tridiag_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// apply correction
lwpk<D, T, ADD>(handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
dw_out, handle.ldws_d, dv, ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
}
}
}
template <uint32_t D, typename T>
void recompose(Handle<D, T> &handle, T *dv, std::vector<int> ldvs,
int l_target) {
// l_end=handle.l_target-4;
int *ldvs_h = new int[handle.D_padded];
for (int d = 0; d < handle.D_padded; d++) {
ldvs_h[d] = ldvs[d];
}
int *ldvs_d;
cudaMallocHelper((void **)&ldvs_d, handle.D_padded * sizeof(int));
cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h, handle.D_padded * sizeof(int),
H2D, 0);
if (D <= 3) {
// printf("intput of recomposition\n");
// print_matrix_cuda(handle.dofs[2][0], handle.dofs[1][0],
// handle.dofs[0][0],
// dv, ldvs[0], ldvs[1], ldvs[0]);
std::string prefix = "recomp_";
if (sizeof(T) == sizeof(double))
prefix += "d_";
if (sizeof(T) == sizeof(float))
prefix += "f_";
for (int d = 0; d < D; d++)
prefix += std::to_string(handle.shapes_h[0][d]) + "_";
// std::cout << prefix << std::endl;
for (int l = l_target - 1; l >= 0; l--) {
// printf("[gpu] l = %d\n", l);
int range_l = ::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = ::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[0][l] % 2 == 0;
// printf("input v\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
int curr_dim_r = 2;
int curr_dim_c = 1;
int curr_dim_f = 0;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
thrust::device_vector<int> unprocessed_dims(1);
unprocessed_dims[0] = 3;
thrust::device_vector<int> processed_dims(0);
if (D >= 1) {
// lpk_reo_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// ldvs_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0,
// handle.dist[0][l], handle.ratio[0][l],
// dv, ldvs_h[0], ldvs_h[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dist[0][l], handle.ratio[0][l], dv,
ldvs_h[0], ldvs_h[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1], handle.dw, handle.ldws_h[0], handle.ldws_h[1],
0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_1_3d" + "_level_" + std::to_string(l), store,
verify);
processed_dims.push_back(0);
// printf("after mass_trans_multiply_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0, handle.am[0][l+1],
// handle.bm[0][l+1], handle.dist[0][l+1], handle.dw,
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.am[0][l + 1], handle.bm[0][l + 1], handle.dist[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_1_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 1) {
lwpk<D, T, SUBTRACT>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], handle.dw, handle.ldws_d,
dv, ldvs_d, 0);
}
}
if (D >= 2) {
// lpk_reo_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0,
// handle.dist[1][l], handle.ratio[1][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, 0), handle.ldws_h[0], handle.ldws_h[1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], 0), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, 0, handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dofs[1][l + 1], handle.dist[1][l], handle.ratio[1][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0, 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_2_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// ipk_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0, handle.am[1][l+1],
// handle.bm[1][l+1], handle.dist[1][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[1][l + 1], handle.bm[1][l + 1],
handle.dist[1][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_2_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// printf("before sub\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
if (D == 2) {
lwpk<D, T, SUBTRACT>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// printf("after sub\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D == 3) {
processed_dims.push_back(1);
// lpk_reo_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0,
// handle.dist[2][l], handle.ratio[2][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_3_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dist[2][l],
handle.ratio[2][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1],
handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_3_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1],handle.ldws_h[0]);
// ipk_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0, handle.am[2][l+1],
// handle.bm[2][l+1], handle.dist[2][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
ipk_3_3d(
handle, handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[2][l + 1], handle.bm[2][l + 1],
handle.dist[2][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_3_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1],handle.ldws_h[0]);
if (D == 3) {
lwpk<D, T, SUBTRACT>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "lwpk" + "_level_" + std::to_string(l), store, verify);
}
}
// printf("before prolongate_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
T *null = NULL;
// gpk_rev<D, T, D, true, true, 1>(handle,
// handle.shapes_h[l], handle.shapes_d[l],
// handle.shapes_d[l+1], handle.ldws_d, ldvs_d,
// unprocessed_dims.size(),
// thrust::raw_pointer_cast(unprocessed_dims.data()), 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
gpk_rev_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l + 1]), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l + 1], 0), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1], 0, 0), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l + 1],
handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], 0),
ldvs[0], ldvs[1],
// null,ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "gpk_rev_3d" + "_level_" + std::to_string(l), store, verify);
// gpk_rev<D, T, D, true, false, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// gpk_rev<D, T, D, false, true, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after prolongate_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
// printf("output\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D >= 4) {
for (int l = l_target - 1; l >= 0; l--) {
// printf("[gpu] l = %d\n", l);
int range_l = ::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = ::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[0][l] % 2 == 0;
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, lddv1, lddv2, lddv1);
int curr_dim_r, curr_dim_c, curr_dim_f;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// start correction calculation
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
int prev_dim_r, prev_dim_c, prev_dim_f;
T *dw_out = handle.dw;
T *dw_in1 = dv;
T *dw_in2 =
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1]));
// mass trans 1D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
thrust::device_vector<int> processed_dims;
// printf("mass trans 1D\n");
lpk_reo_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], ldvs_d, handle.ldws_d,
handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_f][l], handle.ratio[curr_dim_f][l], dw_in1,
lddv1, lddv2,
// dv+get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
// 0, handle.dofs[0][l+1])),
dw_in2, lddv1, lddv2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 1D\n");
ipk_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_f][l + 1], handle.bm[curr_dim_f][l + 1],
handle.dist[curr_dim_f][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_f);
// printf("after solve_tridiag_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 2D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
0, handle.dofs[curr_dim_c][l + 1], 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, 0, handle.dofs[prev_dim_f][l + 1]));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 2D\n");
lpk_reo_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_c][l], handle.ratio[curr_dim_c][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, 0)),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], 0)),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 2D\n");
ipk_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_c][l + 1], handle.bm[curr_dim_c][l + 1],
handle.dist[curr_dim_c][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_c);
// printf("after solve_tridiag_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 3D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, handle.dofs[prev_dim_c][l + 1], 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 3D\n");
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, handle.dofs[2][l+1], 0, handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 3D\n");
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_r);
// printf("after solve_tridiag_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 4D
for (int i = 3; i < D; i++) {
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = i;
dw_in1 = dw_out;
dw_in2 =
dw_out +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, prev_dim_r, prev_dim_c, prev_dim_f,
handle.dofs[prev_dim_r][l + 1], 0, 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans %dD\n", i+1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag %dD\n", i+1);
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(i);
}
// printf("after solve_tridiag_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// un-apply correction
lwpk<D, T, SUBTRACT>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], dw_out, handle.ldws_d, dv,
ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.db, handle.ldbs_d, 0);
// printf("interpolate 1-3D rev\n");
thrust::device_vector<int> unprocessed_dims;
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
curr_dim_r = 2, curr_dim_c = 1, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
gpk_rev<D, 3, T, true, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null,ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("interpolate 4-5D rev\n");
curr_dim_r = 4, curr_dim_c = 3, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_rev<D, 2, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_rev<D, 3, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
lwpk<D, T, COPY>(handle, shape, handle.dw, handle.ldws_h, dv, ldvs, 0);
// printf("after interpolate 4D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("reorder restore 1-3D\n");
curr_dim_r = 2, curr_dim_c = 1, curr_dim_f = 0;
lddw1 = 1, lddw2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
gpk_rev<D, 3, T, false, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, handle.db, lddb1, lddb2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null,ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, handle.db, handle.ldbs_d, 0);
// printf("reorder 1-3D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.db+i*handle.ldbs_h[0]*handle.ldbs_h[1]*handle.ldbs_h[2],
// handle.ldbs_h[0], handle.ldbs_h[1],
// handle.ldbs_h[0]);
// }
// printf("reorder restore nodal values 1-4D\n");
curr_dim_r = 4, curr_dim_c = 3, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_rev<D, 2, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], ldvs_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], dv, lddv1,
lddv2, handle.db, lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
gpk_rev<D, 3, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], ldvs_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], dv, lddv1,
lddv2, handle.db, lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after coeff restore 4D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
}
}
}
#define KERNELS(D, T) \
template void decompose<D, T>(Handle<D, T> & handle, T * dv, \
std::vector<int> ldvs, int l_target); \
template void recompose<D, T>(Handle<D, T> & handle, T * dv, \
std::vector<int> ldvs, int l_target);
KERNELS(1, double)
KERNELS(1, float)
KERNELS(2, double)
KERNELS(2, float)
KERNELS(3, double)
KERNELS(3, float)
KERNELS(4, double)
KERNELS(4, float)
KERNELS(5, double)
KERNELS(5, float)
#undef KERNELS
} // namespace mgard_cuda
| 3c5342146758d072e510a6d4018bfcc0f3e93efb.cu | /*
* Copyright 2021, Oak Ridge National Laboratory.
* MGARD-GPU: MultiGrid Adaptive Reduction of Data Accelerated by GPUs
* Author: Jieyang Chen ([email protected])
* Date: April 2, 2021
*/
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include "cuda/CommonInternal.h"
#include "cuda/GridProcessingKernel.h"
#include "cuda/GridProcessingKernel3D.h"
#include "cuda/IterativeProcessingKernel.h"
#include "cuda/IterativeProcessingKernel3D.h"
#include "cuda/LevelwiseProcessingKernel.h"
#include "cuda/LinearProcessingKernel.h"
#include "cuda/LinearProcessingKernel3D.h"
#include "cuda/DataRefactoring.h"
#include <iostream>
#include <chrono>
namespace mgard_cuda {
bool store = false;
bool verify = false;
template <uint32_t D, typename T>
void decompose(Handle<D, T> &handle, T *dv, std::vector<int> ldvs,
int l_target) {
int *ldvs_h = new int[handle.D_padded];
for (int d = 0; d < handle.D_padded; d++) {
ldvs_h[d] = ldvs[d];
}
int *ldvs_d;
cudaMallocHelper((void **)&ldvs_d, handle.D_padded * sizeof(int));
cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h, handle.D_padded * sizeof(int),
H2D, 0);
std::string prefix = "decomp_";
if (sizeof(T) == sizeof(double))
prefix += "d_";
if (sizeof(T) == sizeof(float))
prefix += "f_";
for (int d = 0; d < D; d++)
prefix += std::to_string(handle.shapes_h[0][d]) + "_";
// std::cout << prefix << std::endl;
if (D <= 3) {
thrust::device_vector<int> empty_vector(0);
int unprocessed_n = 0;
int *unprocessed_dims = thrust::raw_pointer_cast(empty_vector.data());
for (int l = 0; l < l_target; ++l) {
// printf("[gpu] l = %d\n", l);
int stride = std::pow(2, l);
int Cstride = stride * 2;
int range_l = std::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = std::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
// printf("range_l: %d, range_lp1: %d\n", range_l, range_lp1);
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
// printf("input v\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
// printf("before pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// printf("before pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// thrust::device_vector<int> unprocessed_dims(0);
int lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = 0; s < 1; s++) {
lddv1 *= ldvs[s];
}
for (int s = 1; s < 2; s++) {
lddv2 *= ldvs[s];
}
for (int s = 0; s < 1; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = 1; s < 2; s++) {
lddw2 *= handle.ldws_h[s];
}
T *null = NULL;
// printf("gpk_reo\n");
// gpk_reo<D, T, D, true, true, 1>(handle,
// handle.shapes_h[l], handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, ldvs_d, unprocessed_n, unprocessed_dims, 2, 1,
// 0, handle.ratio[2][l], handle.ratio[1][l],
// handle.ratio[0][l], handle.dw, handle.ldws_h[0],
// handle.ldws_h[1], dv, ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l+1]),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l+1], 0),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1], 0, 0),
// ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs_h[0], ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs_h[0],
// ldvs_h[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
gpk_reo_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l + 1], 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1], 0, 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, handle.dofs[1][l + 1],
handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], 0),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1],
// null, ldvs[0], ldvs[1],
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("gpk_reo\n");
// //handle.sync(0);
verify_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
handle.dofs[0][l], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "gpk_reo_3d" + "_level_" + std::to_string(l),
store, verify);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// gpk_reo<D, T, D, true, false, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after interpolate\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
// gpk_reo<D, T, D, false, true, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// //null, ldvs[0], ldvs[1],
// 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after pi_Ql_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
thrust::device_vector<int> processed_dims(0);
if (D >= 1) {
// lpk_reo_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// ldvs_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0,
// handle.dist[0][l], handle.ratio[0][l],
// dv, ldvs_h[0], ldvs_h[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dist[0][l], handle.ratio[0][l], dv,
ldvs_h[0], ldvs_h[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1], handle.dw, handle.ldws_h[0], handle.ldws_h[1],
0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_1_3d" + "_level_" + std::to_string(l), store,
verify);
processed_dims.push_back(0);
// printf("after mass_trans_multiply_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0, handle.am[0][l+1],
// handle.bm[0][l+1], handle.dist[0][l+1], handle.dw,
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.am[0][l + 1], handle.bm[0][l + 1], handle.dist[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_1_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 1) {
lwpk<D, T, ADD>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], handle.dw, handle.ldws_d, dv,
ldvs_d, 0);
// printf("after add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D >= 2) {
// lpk_reo_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0,
// handle.dist[1][l], handle.ratio[1][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, 0), handle.ldws_h[0], handle.ldws_h[1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], 0), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, 0, handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dofs[1][l + 1], handle.dist[1][l], handle.ratio[1][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0, 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// //handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_2_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// ipk_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0, handle.am[1][l+1],
// handle.bm[1][l+1], handle.dist[1][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[1][l + 1], handle.bm[1][l + 1],
handle.dist[1][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_2_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// printf("before add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
if (D == 2) {
lwpk<D, T, ADD>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// printf("after add\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D == 3) {
processed_dims.push_back(1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], 2, 1, 0, handle.dist[2][l],
handle.ratio[2][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1],
handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// lpk_reo_3_3d(handle,
// handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1], handle.dofs[2][l+1],
// handle.dist[2][l], handle.ratio[2][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_3_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0, handle.am[2][l+1],
// handle.bm[2][l+1], handle.dist[2][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
ipk_3_3d(
handle, handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[2][l + 1], handle.bm[2][l + 1],
handle.dist[2][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_3_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 3) {
lwpk<D, T, ADD>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "lwpk" + "_level_" + std::to_string(l), store, verify);
}
}
} // end of loop
// printf("output of decomposition\n");
// print_matrix_cuda(handle.dofs[2][0], handle.dofs[1][0],
// handle.dofs[0][0],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
if (D >= 4) {
for (int l = 0; l < l_target; ++l) {
// printf("[gpu] l = %d\n", l);
int stride = std::pow(2, l);
int Cstride = stride * 2;
int range_l = std::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = std::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[2][l] % 2 == 0;
int curr_dim_r, curr_dim_c, curr_dim_f;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
// printf("D_padded: %d\n", handle.D_padded);
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
// printf("%d %d\n", shape[d], shape_c[d]);
}
thrust::device_vector<int> unprocessed_dims;
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
// printf("input: \n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.db, handle.ldbs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// // print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// // handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0]);
// compare_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0],false);
// }
// printf("ldvs: ");
// for (int i = 0; i < D; i++) { std::cout << ldvs[i] << " ";}
// printf("\n");
// printf("ldws_h: ");
// for (int i = 0; i < D; i++) { std::cout << handle.ldws_h[i] << " ";}
// printf("\n");
// printf("lddv: %d %d lddw: %d %d\n", lddv1, lddv2, lddw1, lddw2);
// cudaMemset3DHelper(dv, ldvs[0]*sizeof(T), ldvs[0]*sizeof(T),
// ldvs[1], 0, handle.dofs[0][l]*sizeof(T),
// handle.dofs[1][l],
// handle.dofs[2][l]*handle.dofs[3][l]);
// printf("interpolate 1-3D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
gpk_reo<D, 3, T, true, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
0, handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.dw, handle.ldws_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("interpolate 4-5D\n");
curr_dim_f = 0, curr_dim_c = 3, curr_dim_r = 4;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("lddv1(%d), lddv2(%d), lddw1(%d), lddw2(%d)\n", lddv1, lddv2,
// lddw1, lddw2);
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_reo<D, 2, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_reo<D, 3, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after interpolate 4D:\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("reorder 1-3D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
lddw1 = 1, lddw2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
gpk_reo<D, 3, T, false, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, handle.ldws_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, handle.dw, lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw + get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw + get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddw1, lddw2,
// null, lddv1, lddv2,
handle.dw +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddw1, lddw2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("dv before calc\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, handle.db, handle.ldbs_d, 0);
// printf("db before calc\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.db+i*handle.ldbs_h[0]*handle.ldbs_h[1]*handle.ldbs_h[2],
// handle.ldbs_h[0], handle.ldbs_h[1],
// handle.ldbs_h[0]);
// }
// printf("calc coeff 1-5D\n");
curr_dim_f = 0, curr_dim_c = 3, curr_dim_r = 4;
lddv1 = 1, lddv2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_reo<D, 2, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_reo<D, 3, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldbs_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.db,
lddb1, lddb2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after calc coeff 4D\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// // for (int i = 0; i < 1; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// start correction calculation
int prev_dim_r, prev_dim_c, prev_dim_f;
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
T *dw_out = handle.dw;
T *dw_in1 = dv;
T *dw_in2 =
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1]));
// printf("mass trans 1D\n");
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
thrust::device_vector<int> processed_dims;
lpk_reo_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], ldvs_d, handle.ldws_d,
handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_f][l], handle.ratio[curr_dim_f][l], dw_in1,
lddv1, lddv2,
// dv+get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
// 0, handle.dofs[0][l+1])),
dw_in2, lddv1, lddv2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 1D\n");
// ipk_1<D, T>(handle, shape, shape_c, handle.ldws_h, handle.ldws_h,
// processed_dims, curr_dim_r, curr_dim_c, curr_dim_f,
// handle.am[curr_dim_f][l+1], handle.bm[curr_dim_f][l+1],
// handle.dist[curr_dim_f][l+1], dw_out, lddw1, lddw2, 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_f][l + 1], handle.bm[curr_dim_f][l + 1],
handle.dist[curr_dim_f][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_f);
// printf("after solve_tridiag_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 2D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
0, handle.dofs[curr_dim_c][l + 1], 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, 0, handle.dofs[prev_dim_f][l + 1]));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 2D\n");
lpk_reo_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_c][l], handle.ratio[curr_dim_c][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, 0)),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], 0)),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 2D\n");
ipk_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_c][l + 1], handle.bm[curr_dim_c][l + 1],
handle.dist[curr_dim_c][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_c);
// printf("after solve_tridiag_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 3D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, handle.dofs[prev_dim_c][l + 1], 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 3D\n");
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, handle.dofs[2][l+1], 0, handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 3D\n");
ipk_3<D, T>(
handle, shape, shape_c, handle.ldws_h, handle.ldws_h, processed_dims,
curr_dim_r, curr_dim_c, curr_dim_f, handle.am[curr_dim_r][l + 1],
handle.bm[curr_dim_r][l + 1], handle.dist[curr_dim_r][l + 1], dw_out,
lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(curr_dim_r);
// printf("after solve_tridiag_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 4D+
for (int i = 3; i < D; i++) {
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = i;
dw_in1 = dw_out;
dw_in2 =
dw_out +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, prev_dim_r, prev_dim_c, prev_dim_f,
handle.dofs[prev_dim_r][l + 1], 0, 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans %dD\n", i+1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag %dD\n", i+1);
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
processed_dims.push_back(i);
}
// printf("after solve_tridiag_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// apply correction
lwpk<D, T, ADD>(handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
dw_out, handle.ldws_d, dv, ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
}
}
}
template <uint32_t D, typename T>
void recompose(Handle<D, T> &handle, T *dv, std::vector<int> ldvs,
int l_target) {
// l_end=handle.l_target-4;
int *ldvs_h = new int[handle.D_padded];
for (int d = 0; d < handle.D_padded; d++) {
ldvs_h[d] = ldvs[d];
}
int *ldvs_d;
cudaMallocHelper((void **)&ldvs_d, handle.D_padded * sizeof(int));
cudaMemcpyAsyncHelper(handle, ldvs_d, ldvs_h, handle.D_padded * sizeof(int),
H2D, 0);
if (D <= 3) {
// printf("intput of recomposition\n");
// print_matrix_cuda(handle.dofs[2][0], handle.dofs[1][0],
// handle.dofs[0][0],
// dv, ldvs[0], ldvs[1], ldvs[0]);
std::string prefix = "recomp_";
if (sizeof(T) == sizeof(double))
prefix += "d_";
if (sizeof(T) == sizeof(float))
prefix += "f_";
for (int d = 0; d < D; d++)
prefix += std::to_string(handle.shapes_h[0][d]) + "_";
// std::cout << prefix << std::endl;
for (int l = l_target - 1; l >= 0; l--) {
// printf("[gpu] l = %d\n", l);
int range_l = std::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = std::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[0][l] % 2 == 0;
// printf("input v\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
int curr_dim_r = 2;
int curr_dim_c = 1;
int curr_dim_f = 0;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
thrust::device_vector<int> unprocessed_dims(1);
unprocessed_dims[0] = 3;
thrust::device_vector<int> processed_dims(0);
if (D >= 1) {
// lpk_reo_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// ldvs_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0,
// handle.dist[0][l], handle.ratio[0][l],
// dv, ldvs_h[0], ldvs_h[1],
// dv+get_idx(ldvs_h[0], ldvs_h[1], 0, 0,
// handle.dofs[0][l+1]), ldvs_h[0], ldvs_h[1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dist[0][l], handle.ratio[0][l], dv,
ldvs_h[0], ldvs_h[1],
dv + get_idx(ldvs_h[0], ldvs_h[1], 0, 0, handle.dofs[0][l + 1]),
ldvs_h[0], ldvs_h[1], handle.dw, handle.ldws_h[0], handle.ldws_h[1],
0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_1_3d" + "_level_" + std::to_string(l), store,
verify);
processed_dims.push_back(0);
// printf("after mass_trans_multiply_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
// ipk_1<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[0], handle.processed_dims_h[0],
// handle.processed_dims_d[0], 2, 1, 0, handle.am[0][l+1],
// handle.bm[0][l+1], handle.dist[0][l+1], handle.dw,
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_1_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.am[0][l + 1], handle.bm[0][l + 1], handle.dist[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dw, handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_1_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_1_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1]
// ,handle.ldws_h[0]);
if (D == 1) {
lwpk<D, T, SUBTRACT>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], handle.dw, handle.ldws_d,
dv, ldvs_d, 0);
}
}
if (D >= 2) {
// lpk_reo_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0,
// handle.dist[1][l], handle.ratio[1][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, 0), handle.ldws_h[0], handle.ldws_h[1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], 0), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, 0, handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l + 1],
handle.dofs[1][l + 1], handle.dist[1][l], handle.ratio[1][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0, 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], 0),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_2_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// ipk_2<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[1], handle.processed_dims_h[1],
// handle.processed_dims_d[1], 2, 1, 0, handle.am[1][l+1],
// handle.bm[1][l+1], handle.dist[1][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
ipk_2_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[1][l + 1], handle.bm[1][l + 1],
handle.dist[1][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_2_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_2_cpt\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1] ,handle.ldws_h[0]);
// printf("before sub\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
if (D == 2) {
lwpk<D, T, SUBTRACT>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// printf("after sub\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D == 3) {
processed_dims.push_back(1);
// lpk_reo_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0,
// handle.dist[2][l], handle.ratio[2][l],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, 0, handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1], 0,
// handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
lpk_reo_3_3d(
handle, handle.dofs[2][l], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.dofs[2][l + 1], handle.dist[2][l],
handle.ratio[2][l],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0, 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1],
handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "lpk_reo_3_3d" + "_level_" + std::to_string(l), store,
verify);
// printf("after mass_trans_multiply_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1],
// 0, handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1],handle.ldws_h[0]);
// ipk_3<D, T>(handle,
// handle.shapes_h[l], handle.shapes_h[l+1],
// handle.shapes_d[l], handle.shapes_d[l+1],
// handle.ldws_d, handle.ldws_d,
// handle.processed_n[2], handle.processed_dims_h[2],
// handle.processed_dims_d[2], 2, 1, 0, handle.am[2][l+1],
// handle.bm[2][l+1], handle.dist[2][l+1],
// handle.dw+get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.dofs[1][l+1], handle.dofs[0][l+1]),
// handle.ldws_h[0], handle.ldws_h[1], 0,
// handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
ipk_3_3d(
handle, handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], handle.am[2][l + 1], handle.bm[2][l + 1],
handle.dist[2][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], 0,
handle.auto_tuning_ts3[handle.arch][handle.precision][range_lp1]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1], handle.dofs[0][l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "ipk_3_3d" + "_level_" + std::to_string(l), store, verify);
// printf("after solve_tridiag_3_cpt\n");
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// handle.dw+get_idx(handle.ldws_h[0],
// handle.ldws_h[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), handle.ldws_h[0],
// handle.ldws_h[1],handle.ldws_h[0]);
if (D == 3) {
lwpk<D, T, SUBTRACT>(
handle, handle.shapes_h[l + 1], handle.shapes_d[l + 1],
handle.dw + get_idx(handle.ldws_h[0], handle.ldws_h[1], 0,
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
handle.ldws_d, dv, ldvs_d, 0);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l + 1], handle.dofs[1][l + 1],
handle.dofs[0][l + 1], dv, ldvs_h[0], ldvs_h[1], ldvs_h[0],
prefix + "lwpk" + "_level_" + std::to_string(l), store, verify);
}
}
// printf("before prolongate_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
T *null = NULL;
// gpk_rev<D, T, D, true, true, 1>(handle,
// handle.shapes_h[l], handle.shapes_d[l],
// handle.shapes_d[l+1], handle.ldws_d, ldvs_d,
// unprocessed_dims.size(),
// thrust::raw_pointer_cast(unprocessed_dims.data()), 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
gpk_rev_3d(
handle, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l],
handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l + 1]), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l + 1], 0), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1], 0, 0), ldvs[0],
ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l + 1],
handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1], 0,
handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], 0),
ldvs[0], ldvs[1],
// null,ldvs[0], ldvs[1],
dv + get_idx(ldvs[0], ldvs[1], handle.dofs[2][l + 1],
handle.dofs[1][l + 1], handle.dofs[0][l + 1]),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// handle.sync(0);
verify_matrix_cuda(
handle.dofs[2][l], handle.dofs[1][l], handle.dofs[0][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], handle.ldws_h[0],
prefix + "gpk_rev_3d" + "_level_" + std::to_string(l), store, verify);
// gpk_rev<D, T, D, true, false, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// gpk_rev<D, T, D, false, true, 1>(handle,
// shape, shape_c, handle.ldws_h, ldvs, unprocessed_dims,
// 2, 1, 0,
// handle.ratio[2][l], handle.ratio[1][l], handle.ratio[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// dv, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, 0, handle.dofs[0][l+1]),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1], 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0, 0),
// ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], 0, handle.dofs[1][l+1],
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1]), ldvs[0], ldvs[1],
// // null, ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], 0), ldvs[0], ldvs[1],
// // null,ldvs[0], ldvs[1],
// dv+get_idx(ldvs[0], ldvs[1], handle.dofs[2][l+1],
// handle.dofs[1][l+1], handle.dofs[0][l+1]), ldvs[0],
// ldvs[1],
// // null, ldvs[0], ldvs[1],
// 0, 0, 0, handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l], 0,
// handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
// printf("after prolongate_reo\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw, handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
// printf("output\n");
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, ldvs[0], ldvs[1], ldvs[0]);
}
}
if (D >= 4) {
for (int l = l_target - 1; l >= 0; l--) {
// printf("[gpu] l = %d\n", l);
int range_l = std::min(6, (int)std::log2(handle.dofs[0][l]) - 1);
int range_lp1 = std::min(6, (int)std::log2(handle.dofs[0][l + 1]) - 1);
bool f_padding = handle.dofs[0][l] % 2 == 0;
bool c_padding = handle.dofs[1][l] % 2 == 0;
bool r_padding = handle.dofs[0][l] % 2 == 0;
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv, lddv1, lddv2, lddv1);
int curr_dim_r, curr_dim_c, curr_dim_f;
int lddv1, lddv2;
int lddw1, lddw2;
int lddb1, lddb2;
thrust::device_vector<int> shape(handle.D_padded);
thrust::device_vector<int> shape_c(handle.D_padded);
for (int d = 0; d < handle.D_padded; d++) {
shape[d] = handle.dofs[d][l];
shape_c[d] = handle.dofs[d][l + 1];
}
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// start correction calculation
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
int prev_dim_r, prev_dim_c, prev_dim_f;
T *dw_out = handle.dw;
T *dw_in1 = dv;
T *dw_in2 =
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1]));
// mass trans 1D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
thrust::device_vector<int> processed_dims;
// printf("mass trans 1D\n");
lpk_reo_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], ldvs_d, handle.ldws_d,
handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_f][l], handle.ratio[curr_dim_f][l], dw_in1,
lddv1, lddv2,
// dv+get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
// 0, handle.dofs[0][l+1])),
dw_in2, lddv1, lddv2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// handle.dw+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 1D\n");
ipk_1<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[0], handle.processed_dims_h[0],
handle.processed_dims_d[0], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_f][l + 1], handle.bm[curr_dim_f][l + 1],
handle.dist[curr_dim_f][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_f);
// printf("after solve_tridiag_1\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 2D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
0, handle.dofs[curr_dim_c][l + 1], 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, 0, handle.dofs[prev_dim_f][l + 1]));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 2D\n");
lpk_reo_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_c][l], handle.ratio[curr_dim_c][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, 0)),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], 0)),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 2D\n");
ipk_2<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[1], handle.processed_dims_h[1],
handle.processed_dims_d[1], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_c][l + 1], handle.bm[curr_dim_c][l + 1],
handle.dist[curr_dim_c][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_c);
// printf("after solve_tridiag_2\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 3D
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = 2;
dw_in1 = dw_out;
dw_in2 = dw_out + get_idx(handle.ldws_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h, gen_idx(D, prev_dim_r, prev_dim_c, prev_dim_f,
0, handle.dofs[prev_dim_c][l + 1], 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans 3D\n");
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, handle.dofs[2][l+1], 0, handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r, curr_dim_c,
// curr_dim_f, 0, handle.dofs[1][l+1], handle.dofs[0][l+1])),
dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag 3D\n");
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[2], handle.processed_dims_h[2],
handle.processed_dims_d[2], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(curr_dim_r);
// printf("after solve_tridiag_3\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// mass trans 4D
for (int i = 3; i < D; i++) {
curr_dim_f = 0, curr_dim_c = 1, curr_dim_r = i;
dw_in1 = dw_out;
dw_in2 =
dw_out +
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0));
dw_out +=
get_idx(handle.ldws_h,
gen_idx(handle.D_padded, prev_dim_r, prev_dim_c, prev_dim_f,
handle.dofs[prev_dim_r][l + 1], 0, 0));
prev_dim_f = curr_dim_f;
prev_dim_c = curr_dim_c;
prev_dim_r = curr_dim_r;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
// printf("mass trans %dD\n", i+1);
lpk_reo_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.dist[curr_dim_r][l], handle.ratio[curr_dim_r][l],
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, 0, 0, handle.dofs[0][l+1])),
dw_in1, lddw1, lddw2,
// handle.dw+get_idx(handle.ldws_h, gen_idx(D, curr_dim_r,
// curr_dim_c, curr_dim_f, handle.dofs[2][l+1], 0,
// handle.dofs[0][l+1])),
dw_in2, lddw1, lddw2, dw_out, lddw1, lddw2, 0,
handle.auto_tuning_mr1[handle.arch][handle.precision][range_lp1]);
// printf("after mass_trans_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// printf("solve tridiag %dD\n", i+1);
ipk_3<D, T>(
handle, handle.shapes_h[l], handle.shapes_h[l + 1],
handle.shapes_d[l], handle.shapes_d[l + 1], handle.ldws_d,
handle.ldws_d, handle.processed_n[i], handle.processed_dims_h[i],
handle.processed_dims_d[i], curr_dim_r, curr_dim_c, curr_dim_f,
handle.am[curr_dim_r][l + 1], handle.bm[curr_dim_r][l + 1],
handle.dist[curr_dim_r][l + 1], dw_out, lddw1, lddw2, 0,
handle.auto_tuning_ts1[handle.arch][handle.precision][range_lp1]);
// processed_dims.push_back(i);
}
// printf("after solve_tridiag_4\n");
// for (int i = 0; i < handle.dofs[3][l+1]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l+1], handle.dofs[1][l+1],
// handle.dofs[0][l+1],
// dw_out+i*handle.ldws_h[0]*handle.ldws_h[1]*handle.ldws_h[2],
// handle.ldws_h[0], handle.ldws_h[1],
// handle.ldws_h[0]);
// }
// un-apply correction
lwpk<D, T, SUBTRACT>(handle, handle.shapes_h[l + 1],
handle.shapes_d[l + 1], dw_out, handle.ldws_d, dv,
ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l], dv,
ldvs_d, handle.db, handle.ldbs_d, 0);
// printf("interpolate 1-3D rev\n");
thrust::device_vector<int> unprocessed_dims;
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
curr_dim_r = 2, curr_dim_c = 1, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
gpk_rev<D, 3, T, true, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
handle.ldws_h[0], handle.ldws_h[1], dv, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
ldvs[0], ldvs[1],
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null,ldvs[0], ldvs[1],
dv + get_idx(ldvs, gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("interpolate 4-5D rev\n");
curr_dim_r = 4, curr_dim_c = 3, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddw1 = 1, lddw2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_rev<D, 2, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
unprocessed_dims.pop_back();
unprocessed_dims.pop_back();
gpk_rev<D, 3, T, true, false, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, ldvs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, dv, lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs, gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddv1, lddv2,
// null, lddv1, lddv2,
dv + get_idx(ldvs,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddv1, lddv2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, dv, ldvs_d, 0);
lwpk<D, T, COPY>(handle, shape, handle.dw, handle.ldws_h, dv, ldvs, 0);
// printf("after interpolate 4D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
// printf("reorder restore 1-3D\n");
curr_dim_r = 2, curr_dim_c = 1, curr_dim_f = 0;
lddw1 = 1, lddw2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddw1 *= handle.ldws_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddw2 *= handle.ldws_h[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
for (int i = 3; i < D; i++)
unprocessed_dims.push_back(i);
gpk_rev<D, 3, T, false, false, 1>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], handle.ldws_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], handle.dw,
lddw1, lddw2, handle.db, lddb1, lddb2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
0, handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null,ldvs[0], ldvs[1],
handle.db + get_idx(handle.ldbs_h,
gen_idx(D, curr_dim_r, curr_dim_c, curr_dim_f,
handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, ldvs[0], ldvs[1],
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
lwpk<D, T, COPY>(handle, handle.shapes_h[l], handle.shapes_d[l],
handle.dw, handle.ldws_d, handle.db, handle.ldbs_d, 0);
// printf("reorder 1-3D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[curr_dim_r][l],
// handle.dofs[curr_dim_c][l], handle.dofs[curr_dim_f][l],
// handle.db+i*handle.ldbs_h[0]*handle.ldbs_h[1]*handle.ldbs_h[2],
// handle.ldbs_h[0], handle.ldbs_h[1],
// handle.ldbs_h[0]);
// }
// printf("reorder restore nodal values 1-4D\n");
curr_dim_r = 4, curr_dim_c = 3, curr_dim_f = 0;
lddv1 = 1, lddv2 = 1, lddb1 = 1, lddb2 = 1;
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddv1 *= ldvs[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddv2 *= ldvs[s];
}
for (int s = curr_dim_f; s < curr_dim_c; s++) {
lddb1 *= handle.ldbs_h[s];
}
for (int s = curr_dim_c; s < curr_dim_r; s++) {
lddb2 *= handle.ldbs_h[s];
}
if (D % 2 == 0) {
unprocessed_dims.pop_back();
gpk_rev<D, 2, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], ldvs_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], dv, lddv1,
lddv2, handle.db, lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
} else {
gpk_rev<D, 3, T, false, true, 2>(
handle, handle.shapes_h[l], handle.shapes_d[l],
handle.shapes_d[l + 1], ldvs_d, handle.ldbs_d,
unprocessed_dims.size(),
thrust::raw_pointer_cast(unprocessed_dims.data()), curr_dim_r,
curr_dim_c, curr_dim_f, handle.ratio[curr_dim_r][l],
handle.ratio[curr_dim_c][l], handle.ratio[curr_dim_f][l], dv, lddv1,
lddv2, handle.db, lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h, gen_idx(handle.D_padded, curr_dim_r,
curr_dim_c, curr_dim_f, 0, 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0,
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db + get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f,
handle.dofs[curr_dim_r][l + 1], 0, 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, 0, handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1], 0,
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1], 0)),
lddb1, lddb2,
// null, lddv1, lddv2,
handle.db +
get_idx(handle.ldbs_h,
gen_idx(handle.D_padded, curr_dim_r, curr_dim_c,
curr_dim_f, handle.dofs[curr_dim_r][l + 1],
handle.dofs[curr_dim_c][l + 1],
handle.dofs[curr_dim_f][l + 1])),
lddb1, lddb2,
// null, lddv1, lddv2,
0, 0, 0, handle.dofs[curr_dim_r][l], handle.dofs[curr_dim_c][l],
handle.dofs[curr_dim_f][l], 0,
handle.auto_tuning_cc[handle.arch][handle.precision][range_l]);
}
// printf("after coeff restore 4D rev\n");
// for (int i = 0; i < handle.dofs[3][l]; i++) {
// printf("i = %d\n", i);
// print_matrix_cuda(handle.dofs[2][l], handle.dofs[1][l],
// handle.dofs[0][l],
// dv+i*ldvs[0]*ldvs[1]*ldvs[2], ldvs[0], ldvs[1],
// ldvs[0]);
// }
}
}
}
#define KERNELS(D, T) \
template void decompose<D, T>(Handle<D, T> & handle, T * dv, \
std::vector<int> ldvs, int l_target); \
template void recompose<D, T>(Handle<D, T> & handle, T * dv, \
std::vector<int> ldvs, int l_target);
KERNELS(1, double)
KERNELS(1, float)
KERNELS(2, double)
KERNELS(2, float)
KERNELS(3, double)
KERNELS(3, float)
KERNELS(4, double)
KERNELS(4, float)
KERNELS(5, double)
KERNELS(5, float)
#undef KERNELS
} // namespace mgard_cuda
|
dfaf19f0318f5c343c7386f4bf6353a92508ca36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Device code.
*/
#ifndef _VSMOOTH_KERNEL_H_
#define _VSMOOTH_KERNEL_H_
/**********************************************************************
* smooth_velocity_x()
*
* This kernel calculates the convolution for an image in the x direction
* using a [1 1 1 2 1 1 1] filter. Non-tiled - works best if width is
* multiple of 16 or 32. Only used when width <= 512, the max number
* of threads.
*
***********************************************************************/
extern __shared__ float sharedMemSV[];
__global__ void
smooth_velocity_x(float *d_Result, float *d_Data, int width, int height, int pitch) {
int I = threadIdx.x;
int B = blockIdx.x;
float* sequence = sharedMemSV;
sequence[I] = d_Data[B*width + I];
__syncthreads();
float resultX = 0.0;
if( !(I < 3 || I >= width-3) && !(B < 3 || B >= height-3) ) {
//calculate gradient in the x direction
resultX = .125*sequence[I-3] + .125*sequence[I-2] + .125*sequence[I-1] + .25*sequence[I] + .125*sequence[I+1] + .125*sequence[I+2] + .125*sequence[I+3];
}
d_Result[B*width + I] = resultX;
}
/**********************************************************************
* smooth_velocity_x_t()
*
* This kernel calculates the convolution for an image in the x direction
* using a [1 1 1 2 1 1 1] filter. Tiled - works for any image size. A
* bit slower than non-tiled if width < 512 and is a multiple of 16.
*
***********************************************************************/
__global__ void
smooth_velocity_x_t(float *d_Result, float *d_Data, int width, int height, int pitch) {
__shared__ float data[FILTER5_RAD + XSMOOTHV_TILE_WIDTH + FILTER5_RAD];
//Current tile and apron limits, relative to row start
const int tileStart = IMUL(blockIdx.x, XSMOOTHV_TILE_WIDTH);
const int tileEnd = tileStart + XSMOOTHV_TILE_WIDTH - 1;
const int apronStart = tileStart - FILTER5_RAD;
const int apronEnd = tileEnd + FILTER5_RAD;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, width - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, width - 1);
//Row start index in d_Data[]
const int rowStart = IMUL(blockIdx.y, pitch);
const int apronStartAligned = tileStart - FILTER_RAD_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming width and XSMOOTHV_TILE_WIDTH are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
if(!(writePos<FILTER5_RAD || writePos>=width-FILTER5_RAD || blockIdx.y<FILTER5_RAD || blockIdx.y>=height-FILTER5_RAD))
sum = .125*data[smemPos-3] + .125*data[smemPos-2] + .125*data[smemPos-1] + .25*data[smemPos] + .125*data[smemPos+1] + .125*data[smemPos+2] + .125*data[smemPos+3];
d_Result[rowStart + writePos] = sum;
}
}
/**********************************************************************
* smooth_velocity_y()
*
* This kernel calculates the convolution for an image in the y direction
* using a [1 1 1 2 1 1 1] filter.
*
***********************************************************************/
__global__ void
smooth_velocity_y(float *d_Result, float *d_Data, int image_width, int image_height, int pitch) {
__shared__ float data[YSMOOTHV_TILE_WIDTH * (FILTER5_RAD + YSMOOTHV_TILE_HEIGHT + FILTER5_RAD)];
//Current tile and apron limits, in rows
const int tileStart = IMUL(blockIdx.y, YSMOOTHV_TILE_HEIGHT);
const int tileEnd = tileStart + YSMOOTHV_TILE_HEIGHT - 1;
const int apronStart = tileStart - FILTER5_RAD;
const int apronEnd = tileEnd + FILTER5_RAD;
//Clamp tile and apron limits by image borders
// const int tileEndClamped = min(tileEnd, height - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, image_height - 1);
//Current column index
const int columnStart = IMUL(blockIdx.x, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
if(columnStart < image_width) {
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, pitch) + columnStart;
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
data[smemPos] = ((apronStart + threadIdx.y >= apronStartClamped) && (apronStart + threadIdx.y <= apronEndClamped)) ? d_Data[gmemPos] : 0;
}
__syncthreads();
if(columnStart < image_width && threadIdx.y < YSMOOTHV_TILE_HEIGHT && tileStart + threadIdx.y < image_height) {
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y + FILTER5_RAD, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(tileStart + threadIdx.y, pitch) + columnStart;
float sum = 0;
if(!( ( columnStart < FILTER5_RAD ) || ( columnStart >= (image_width-FILTER5_RAD) ) || ( (tileStart + threadIdx.y) < FILTER5_RAD) || ( (tileStart + threadIdx.y) >= (image_height-FILTER5_RAD) ) ))
sum = .125*data[smemPos-3*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos-2*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos-1*YSMOOTHV_TILE_WIDTH] + .25*data[smemPos] + .125*data[smemPos+1*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos+2*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos+3*YSMOOTHV_TILE_WIDTH];
d_Result[gmemPos] = sum;
}
}
#endif // #ifndef _VSMOOTH_KERNEL_H_
| dfaf19f0318f5c343c7386f4bf6353a92508ca36.cu |
/*
* Device code.
*/
#ifndef _VSMOOTH_KERNEL_H_
#define _VSMOOTH_KERNEL_H_
/**********************************************************************
* smooth_velocity_x()
*
* This kernel calculates the convolution for an image in the x direction
* using a [1 1 1 2 1 1 1] filter. Non-tiled - works best if width is
* multiple of 16 or 32. Only used when width <= 512, the max number
* of threads.
*
***********************************************************************/
extern __shared__ float sharedMemSV[];
__global__ void
smooth_velocity_x(float *d_Result, float *d_Data, int width, int height, int pitch) {
int I = threadIdx.x;
int B = blockIdx.x;
float* sequence = sharedMemSV;
sequence[I] = d_Data[B*width + I];
__syncthreads();
float resultX = 0.0;
if( !(I < 3 || I >= width-3) && !(B < 3 || B >= height-3) ) {
//calculate gradient in the x direction
resultX = .125*sequence[I-3] + .125*sequence[I-2] + .125*sequence[I-1] + .25*sequence[I] + .125*sequence[I+1] + .125*sequence[I+2] + .125*sequence[I+3];
}
d_Result[B*width + I] = resultX;
}
/**********************************************************************
* smooth_velocity_x_t()
*
* This kernel calculates the convolution for an image in the x direction
* using a [1 1 1 2 1 1 1] filter. Tiled - works for any image size. A
* bit slower than non-tiled if width < 512 and is a multiple of 16.
*
***********************************************************************/
__global__ void
smooth_velocity_x_t(float *d_Result, float *d_Data, int width, int height, int pitch) {
__shared__ float data[FILTER5_RAD + XSMOOTHV_TILE_WIDTH + FILTER5_RAD];
//Current tile and apron limits, relative to row start
const int tileStart = IMUL(blockIdx.x, XSMOOTHV_TILE_WIDTH);
const int tileEnd = tileStart + XSMOOTHV_TILE_WIDTH - 1;
const int apronStart = tileStart - FILTER5_RAD;
const int apronEnd = tileEnd + FILTER5_RAD;
//Clamp tile and apron limits by image borders
const int tileEndClamped = min(tileEnd, width - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, width - 1);
//Row start index in d_Data[]
const int rowStart = IMUL(blockIdx.y, pitch);
const int apronStartAligned = tileStart - FILTER_RAD_ALIGNED;
const int loadPos = apronStartAligned + threadIdx.x;
//Set the entire data cache contents
//Load global memory values, if indices are within the image borders,
//or initialize with zeroes otherwise
if(loadPos >= apronStart){
const int smemPos = loadPos - apronStart;
data[smemPos] =
((loadPos >= apronStartClamped) && (loadPos <= apronEndClamped)) ?
d_Data[rowStart + loadPos] : 0;
}
__syncthreads();
const int writePos = tileStart + threadIdx.x;
//Assuming width and XSMOOTHV_TILE_WIDTH are multiples of half-warp size,
//rowStart + tileStart is also a multiple of half-warp size,
//thus having proper alignment for coalesced d_Result[] write.
if(writePos <= tileEndClamped){
const int smemPos = writePos - apronStart;
float sum = 0;
if(!(writePos<FILTER5_RAD || writePos>=width-FILTER5_RAD || blockIdx.y<FILTER5_RAD || blockIdx.y>=height-FILTER5_RAD))
sum = .125*data[smemPos-3] + .125*data[smemPos-2] + .125*data[smemPos-1] + .25*data[smemPos] + .125*data[smemPos+1] + .125*data[smemPos+2] + .125*data[smemPos+3];
d_Result[rowStart + writePos] = sum;
}
}
/**********************************************************************
* smooth_velocity_y()
*
* This kernel calculates the convolution for an image in the y direction
* using a [1 1 1 2 1 1 1] filter.
*
***********************************************************************/
__global__ void
smooth_velocity_y(float *d_Result, float *d_Data, int image_width, int image_height, int pitch) {
__shared__ float data[YSMOOTHV_TILE_WIDTH * (FILTER5_RAD + YSMOOTHV_TILE_HEIGHT + FILTER5_RAD)];
//Current tile and apron limits, in rows
const int tileStart = IMUL(blockIdx.y, YSMOOTHV_TILE_HEIGHT);
const int tileEnd = tileStart + YSMOOTHV_TILE_HEIGHT - 1;
const int apronStart = tileStart - FILTER5_RAD;
const int apronEnd = tileEnd + FILTER5_RAD;
//Clamp tile and apron limits by image borders
// const int tileEndClamped = min(tileEnd, height - 1);
const int apronStartClamped = max(apronStart, 0);
const int apronEndClamped = min(apronEnd, image_height - 1);
//Current column index
const int columnStart = IMUL(blockIdx.x, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
if(columnStart < image_width) {
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(apronStart + threadIdx.y, pitch) + columnStart;
//Load global memory values, if indices are within the image borders,
//or initialize with zero otherwise
data[smemPos] = ((apronStart + threadIdx.y >= apronStartClamped) && (apronStart + threadIdx.y <= apronEndClamped)) ? d_Data[gmemPos] : 0;
}
__syncthreads();
if(columnStart < image_width && threadIdx.y < YSMOOTHV_TILE_HEIGHT && tileStart + threadIdx.y < image_height) {
//Shared and global memory indices for current column
int smemPos = IMUL(threadIdx.y + FILTER5_RAD, YSMOOTHV_TILE_WIDTH) + threadIdx.x;
int gmemPos = IMUL(tileStart + threadIdx.y, pitch) + columnStart;
float sum = 0;
if(!( ( columnStart < FILTER5_RAD ) || ( columnStart >= (image_width-FILTER5_RAD) ) || ( (tileStart + threadIdx.y) < FILTER5_RAD) || ( (tileStart + threadIdx.y) >= (image_height-FILTER5_RAD) ) ))
sum = .125*data[smemPos-3*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos-2*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos-1*YSMOOTHV_TILE_WIDTH] + .25*data[smemPos] + .125*data[smemPos+1*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos+2*YSMOOTHV_TILE_WIDTH] + .125*data[smemPos+3*YSMOOTHV_TILE_WIDTH];
d_Result[gmemPos] = sum;
}
}
#endif // #ifndef _VSMOOTH_KERNEL_H_
|
6a34a9e7d19dfc8b84854cd659f884dee0b4791f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "vec_cospif.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
hipMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
vec_cospif), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
vec_cospif), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
vec_cospif), dim3(gridBlock),dim3(threadBlock), 0, 0, n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 6a34a9e7d19dfc8b84854cd659f884dee0b4791f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "vec_cospif.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
size_t n = XSIZE*YSIZE;
float *result = NULL;
cudaMalloc(&result, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
vec_cospif<<<gridBlock,threadBlock>>>(n,result,x);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
vec_cospif<<<gridBlock,threadBlock>>>(n,result,x);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
vec_cospif<<<gridBlock,threadBlock>>>(n,result,x);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
033a711e11cbda0b531a2d4079977edcafe9e48b.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <ctime>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <hip/hip_runtime.h>
using namespace std;
void KNearestNeighborsCPU(float3 *dataArray, int *result, int cnt);
__global__ void KNearestNeighborsGPU(float3 *dataArray, int *result, int cnt);
int cnt = 10000;
int main(int argc, char **argv)
{
clock_t init, end;
// generate the data
srand(time(NULL));
int timt = 0;
float3 *dataArray = new float3[cnt];
int *result = new int[cnt];
for (int i = 0; i < cnt; i++)
{
dataArray[i].x = i;
dataArray[i].y = i;
dataArray[i].z = i;
}
// first check the speed of the algorithm takes on the cpu
for (int i = 0; i < 10; i++)
{
init = clock();
KNearestNeighborsCPU(dataArray, result, cnt);
end = clock();
timt += end - init;
cout << "Iteration number " << i << " took " << end - init << " milliseconds" << endl;
}
cout << "[+] The algorithm on the CPU takes " << timt / 10 << " milliseconds" << endl;
timt = 0;
for (int i = 0; i < 10; i++)
cout << i << " - " << result[i] << endl;
memset(result, 0, cnt);
// allocate and copy memory to the gpu
float3 *deviceData;
int *deviceResult;
if (hipMalloc(&deviceData, sizeof(float3)*cnt) != hipSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return -1;
}
if (hipMalloc(&deviceResult, sizeof(int)*cnt) != hipSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return -2;
}
if (hipMemcpy(deviceData, dataArray, sizeof(float3)*cnt, hipMemcpyHostToDevice) != hipSuccess)
{
cout << "[+] Error in moving memory to the GPU" << endl;
return -3;
}
// third check the simple implementation speed on the gpu
for (int i = 0; i < 10; i++)
{
init = clock();
hipLaunchKernelGGL(( KNearestNeighborsGPU), dim3((cnt / 128) + 1), dim3(128) , 0, 0, deviceData, deviceResult, cnt);
hipMemcpy(result, deviceResult, sizeof(int)*cnt, hipMemcpyDeviceToHost);
end = clock();
timt += end - init;
cout << "Iteration number " << i << " took " << end - init << " milliseconds" << endl;
}
cout << "[+] The algorithm on the GPU takes " << timt / 10 << " milliseconds" << endl;
timt = 0;
for (int i = 0; i < 10; i++)
cout << i << " - " << result[i] << endl;
return 0;
}
// cpu algorithm
void KNearestNeighborsCPU(float3 *dataArray, int *result, int cnt)
{
for (int i = 0; i < cnt; i++)
{
float minimumDist = 3.4028234664e38f, distance = 0;
for (int j = 0; j < cnt; j++)
{
if (i != j)
{
distance = (dataArray[i].x - dataArray[j].x) * (dataArray[i].x - dataArray[j].x);
distance += (dataArray[i].y - dataArray[j].y) * (dataArray[i].y - dataArray[j].y);
distance += (dataArray[i].z - dataArray[j].z) * (dataArray[i].z - dataArray[j].z);
if (distance < minimumDist)
{
minimumDist = distance;
result[i] = j;
}
}
}
}
}
// gpu algorithm
__global__ void KNearestNeighborsGPU(float3 *dataArray, int *result, int cnt)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= cnt) return;
float3 point = dataArray[id], current;
float minimumDist = 3.4028234664e38f, distance = 0;
for (int j = 0; j < cnt; j++)
{
if (id == j) continue;
current = dataArray[j];
distance = (point.x - current.x) * (point.x - current.x);
distance += (point.y - current.y) * (point.y - current.y);
distance += (point.z - current.z) * (point.z - current.z);
if (distance < minimumDist)
{
minimumDist = distance;
result[id] = j;
}
}
} | 033a711e11cbda0b531a2d4079977edcafe9e48b.cu | #include <iostream>
#include <ctime>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <cuda.h>
using namespace std;
void KNearestNeighborsCPU(float3 *dataArray, int *result, int cnt);
__global__ void KNearestNeighborsGPU(float3 *dataArray, int *result, int cnt);
int cnt = 10000;
int main(int argc, char **argv)
{
clock_t init, end;
// generate the data
srand(time(NULL));
int timt = 0;
float3 *dataArray = new float3[cnt];
int *result = new int[cnt];
for (int i = 0; i < cnt; i++)
{
dataArray[i].x = i;
dataArray[i].y = i;
dataArray[i].z = i;
}
// first check the speed of the algorithm takes on the cpu
for (int i = 0; i < 10; i++)
{
init = clock();
KNearestNeighborsCPU(dataArray, result, cnt);
end = clock();
timt += end - init;
cout << "Iteration number " << i << " took " << end - init << " milliseconds" << endl;
}
cout << "[+] The algorithm on the CPU takes " << timt / 10 << " milliseconds" << endl;
timt = 0;
for (int i = 0; i < 10; i++)
cout << i << " - " << result[i] << endl;
memset(result, 0, cnt);
// allocate and copy memory to the gpu
float3 *deviceData;
int *deviceResult;
if (cudaMalloc(&deviceData, sizeof(float3)*cnt) != cudaSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return -1;
}
if (cudaMalloc(&deviceResult, sizeof(int)*cnt) != cudaSuccess)
{
cout << "[+] Unable to allocate GPU memory" << endl;
return -2;
}
if (cudaMemcpy(deviceData, dataArray, sizeof(float3)*cnt, cudaMemcpyHostToDevice) != cudaSuccess)
{
cout << "[+] Error in moving memory to the GPU" << endl;
return -3;
}
// third check the simple implementation speed on the gpu
for (int i = 0; i < 10; i++)
{
init = clock();
KNearestNeighborsGPU<<< (cnt / 128) + 1, 128 >>>(deviceData, deviceResult, cnt);
cudaMemcpy(result, deviceResult, sizeof(int)*cnt, cudaMemcpyDeviceToHost);
end = clock();
timt += end - init;
cout << "Iteration number " << i << " took " << end - init << " milliseconds" << endl;
}
cout << "[+] The algorithm on the GPU takes " << timt / 10 << " milliseconds" << endl;
timt = 0;
for (int i = 0; i < 10; i++)
cout << i << " - " << result[i] << endl;
return 0;
}
// cpu algorithm
void KNearestNeighborsCPU(float3 *dataArray, int *result, int cnt)
{
for (int i = 0; i < cnt; i++)
{
float minimumDist = 3.4028234664e38f, distance = 0;
for (int j = 0; j < cnt; j++)
{
if (i != j)
{
distance = (dataArray[i].x - dataArray[j].x) * (dataArray[i].x - dataArray[j].x);
distance += (dataArray[i].y - dataArray[j].y) * (dataArray[i].y - dataArray[j].y);
distance += (dataArray[i].z - dataArray[j].z) * (dataArray[i].z - dataArray[j].z);
if (distance < minimumDist)
{
minimumDist = distance;
result[i] = j;
}
}
}
}
}
// gpu algorithm
__global__ void KNearestNeighborsGPU(float3 *dataArray, int *result, int cnt)
{
int id = blockIdx.x * blockDim.x + threadIdx.x;
if (id >= cnt) return;
float3 point = dataArray[id], current;
float minimumDist = 3.4028234664e38f, distance = 0;
for (int j = 0; j < cnt; j++)
{
if (id == j) continue;
current = dataArray[j];
distance = (point.x - current.x) * (point.x - current.x);
distance += (point.y - current.y) * (point.y - current.y);
distance += (point.z - current.z) * (point.z - current.z);
if (distance < minimumDist)
{
minimumDist = distance;
result[id] = j;
}
}
} |
0a87c62f48b9a2458dcf76a68b7d6fff76726791.hip | // !!! This is a file automatically generated by hipify!!!
//############################################
//#
//#
//#
//#
//##############################################
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#define PI 3.141592653
#define EPS 0.000000001
#define BlockSize1 16// tile size in 1st-axis
#define BlockSize2 16// tile size in 2nd-axis
#define BlockSize 512
#define mm 4 // half of the order in space
#define npd 20 // absorbing boundry condition wield
__device__ float s, t, r;
//a#############################################################################################
__constant__ float stencil[mm+1]={-205.0/72.0,8.0/5.0,-1.0/5.0,8.0/315.0,-1.0/560.0};
//a#############################################################################################
__global__ void cuda_step_fd3d(float *p0, float *p1, float *VV, float _dz2, float _dx2, float _dy2, int n1, int n2, int n3,
float dt, float *pdt2, bool pdt)
/*< step forward: 3-D FD, order=8 >*/
{
bool validr = true;
bool validw = true;
const int gtid1 = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int gtid2 = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
const int ltid1 = threadIdx.x;//ithreadz
const int ltid2 = threadIdx.y;//ithreadx
const int work1 = blockDim.x;//nblockz
const int work2 = blockDim.y;//nblockx
__shared__ float tile[BlockSize2 + 2 * mm][BlockSize1 + 2 * mm];//tile[16+2*mm][16+2*mm]
const int stride2 = n1 + 2 * mm + 2 * npd;//n1=nz
const int stride3 = stride2 * (n2 + 2 * mm + 2 * npd);//n2=nx stride3=(nz+2*mm)*(nx+2*mm)
int inIndex = 0;
int outIndex = 0;
// Advance inputIndex to start of inner volume
inIndex += (mm ) * stride2 + mm ;// inIndex=mm*(nz+2*mm+2*npd)+mm;
// Advance inputIndex to target element
inIndex += gtid2 * stride2 + gtid1; // inIndex=mm*(nz+2*mm)+mm+ix*(nz+2*mm+2*npd)+iz;:igrid
float infront[mm];
float behind[mm];
float current;
const int t1 = ltid1 + mm;
const int t2 = ltid2 + mm;
// Check in bounds
if ((gtid1 >= n1 + mm + 2*npd) ||(gtid2 >= n2 + mm + 2*npd)) validr = false;
if ((gtid1 >= n1 + 2*npd) ||(gtid2 >= n2 + 2*npd)) validw = false;
// Preload the "infront" and "behind" data
for (int i = mm -2 ; i >= 0 ; i--)//change 'mm-2' to 'mm-1'+++++++++++++++++++
{
if (validr) behind[i] = p1[inIndex];
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
}
if (validr) current = p1[inIndex];
outIndex = inIndex;
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
for (int i = 0 ; i < mm ; i++)
{
if (validr) infront[i] = p1[inIndex];
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
}
// Step through the zx-planes
for (int i3 = mm ; i3 < n3 + 2*npd + mm ; i3++)
{
// Advance the slice (move the thread-front)
for (int i = mm - 1 ; i > 0 ; i--) behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
for (int i = 0 ; i < mm - 1 ; i++) infront[i] = infront[i + 1];
if (validr) infront[mm - 1] = p1[inIndex];
inIndex += stride3;
outIndex += stride3;
__syncthreads();
// Update the data slice in the local tile
// Halo above & below
if (ltid2 < mm)
{
/* tile[ithread][ithread+mm]=p1[igrid - mm*(nz+2*mm)] */
tile[ltid2][t1] = p1[outIndex - mm * stride2];//t1 = ltid1 + mm;
tile[ltid2 + work2 + mm][t1] = p1[outIndex + work2 * stride2];
}
// Halo left & right
if (ltid1 < mm)
{
tile[t2][ltid1] = p1[outIndex - mm];
tile[t2][ltid1 + work1 + mm] = p1[outIndex + work1];
}
tile[t2][t1] = current;
__syncthreads();
// Compute the output value
float c1, c2, c3;
c1=c2=c3=stencil[0]*current;
for (int i=1; i <= mm ; i++)
{
c1 +=stencil[i]*(tile[t2][t1-i]+ tile[t2][t1+i]);//z
c2 +=stencil[i]*(tile[t2-i][t1]+ tile[t2+i][t1]);//x
c3 +=stencil[i]*(infront[i-1] + behind[i-1] ); //y
}
c1*=_dz2;
c2*=_dx2;
c3*=_dy2;
if (validw&&pdt) pdt2[outIndex]= (c1+c2+c3);
if (validw) p0[outIndex]=2.0*p1[outIndex]-p0[outIndex]+VV[outIndex]*VV[outIndex]*dt*dt*(c1+c2+c3);
}
}
//a#############################################################################################
void check_gpu_error (const char *msg)
/*< check GPU errors >*/
{
hipError_t err = hipGetLastError ();
if (hipSuccess != err) {
printf ("Cuda error: %s: %s", msg, hipGetErrorString (err));
exit(0);
}
}
//a#############################################################################################
void window3d(float *a, float *b, int n1, int n2, int n3)
/*< window a 3d subvolume >*/
{
int i1, i2, i3, nn1, nn2;
nn1=n1+2*mm+ 2*npd;//z
nn2=n2+2*mm+ 2*npd;//x
for(i3=0; i3<n3; i3++)
for(i2=0; i2<n2; i2++)
for(i1=0; i1<n1; i1++)
{
a[i1+n1*i2+n1*n2*i3]=b[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)];
}
}
//a#############################################################################################
void velocity_transform(float *v, float*vv, float dt, int n1, int n2, int n3)
/*< velocit2 transform: vv=v*dt; vv<--vv^2 >*/
{
int i1, i2, i3, nn1, nn2, nn3;
float tmp;
nn1=n1+2*mm+2*npd;
nn2=n2+2*mm+2*npd;
nn3=n3+2*mm+2*npd;
// inner zone
for(i3=0; i3<n3; i3++){//y
for(i2=0; i2<n2; i2++){//x
for(i1=0; i1<n1; i1++){//z
tmp=v[i1+n1*i2+n1*n2*i3];
vv[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]=tmp;
}
}
}
//top & down
for(i3=0; i3<nn3; i3++){//y
for(i2=0; i2<nn2; i2++){//x
for (i1=0; i1<mm+npd; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[mm+npd+nn1*i2+nn1*nn2*i3];
vv[(nn1-i1-1)+nn1*i2+nn1*nn2*i3]=vv[(nn1-mm-npd-1)+nn1*i2+nn1*nn2*i3];
}
}
}
//left & right
for(i3=0; i3<nn3; i3++){//y
for(i2=0; i2<mm+npd; i2++){//x
for (i1=0; i1<nn1; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*(mm+npd)+nn1*nn2*i3];
vv[i1+nn1*(nn2-i2-1)+nn1*nn2*i3]=vv[i1+nn1*(nn2-mm-npd-1)+nn1*nn2*i3];
}
}
}
//front & back
for(i3=0; i3<mm+npd; i3++){//y
for(i2=0; i2<nn2; i2++){//x
for(i1=0; i1<nn1; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*i2+nn1*nn2*(mm+npd)];
vv[i1+nn1*i2+nn1*nn2*(nn3-1-i3)]=vv[i1+nn1*i2+nn1*nn2*(nn3-mm-npd-1)];
}
}
}
}
//a#############################################################################################
__global__ void cuda_add_source(bool add, float *p, float *source, int *szxy, int ns)
/*< add/subtract sources: length of source[]=ns, index stored in szxy[] >*/
{
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(id<ns){
if(add){
p[szxy[id]]+=source[id];
}else{
p[szxy[id]]-=source[id];
}
}
}
//a#############################################################################################
__global__ void cuda_record(float *P, float *seis, int *gxz, int ng, int it, int nt, bool record)//++++++++++++
/*< record the seismogram at time it >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if (id<ng)
{
if(record) seis[it+id*nt]=P[gxz[id]];
else P[gxz[id]]=seis[it+id*nt];
}
}
//a#############################################################################################
__global__ void cuda_cal_illum(float *s, float *p, int nz, int nx, int ny)
/*< calculate the source lighting matrix >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) s[id]+=p[id]*p[id];
}
}
//a#############################################################################################
__global__ void cuda_illum(float *g1, float *illum, int nz, int nx, int ny)
/*< source lighting >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny&&illum[id]!=0) g1[id]/=illum[id];
}
}
//a#############################################################################################
__global__ void cuda_sum(float *ns, float *is, int nz, int nx, int ny)
/*< source lighting >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) ns[id]+=is[id];
}
}
//a#############################################################################################
__global__ void cuda_cal_g1(float *g1, float *s, float *g, int nz, int nx, int ny)
/*< calculate is g1 >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) g1[id]+=s[id]*g[id];
}
}
//a#############################################################################################
__global__ void cuda_absorb_bndr(float *P,float *Q,int nz,int nx,int ny,float qp)
/*< absorb boundry condition >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
/*< front & back (0<y<ny) >*/
if ( iy < npd ){
P[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*Q[id];
}else if ( iy >= 2*mm + npd + ny ){
P[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*Q[id];
}
/*< left & right (0<x<nx) >*/
if ( ix < npd ){
P[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*Q[id];
}else if ( ix >= 2*mm + npd + nx ){
P[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*Q[id];
}
/*< up & down (0<z<nz) >*/
if ( iz < npd ){
P[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*Q[id];
}else if ( iz >= 2*mm + npd + nz ){
P[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*Q[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_residuals(float *obj, float *cal, float *obs, float *com, int nn, int nx, int ny, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt){
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nn)
com[id]=cal[id] - obs[id];
*obj+=com[id]*com[id];
}
}
}
//a#############################################################################################
__global__ void cuda_ricker_wavelet(float *wlt, float favg, float dt, int nt, float pfac)
/*< generate ricker wavelet with time deley >*/
{
int it=threadIdx.x+blockDim.x*blockIdx.x;
if (it<nt){
float tmp = PI*favg*fabsf(it*dt-1.0/favg);//delay the wavelet to exhibit all waveform
tmp *=tmp;
wlt[it]= (1.0-2.0*tmp)*expf(-tmp);// ricker wavelet at time: t=nt*dt
}
}
//a#############################################################################################
__global__ void cuda_set_s(int *szxy, int fsz, int fsx, int fsy, int dsz, int dsx, int dsy, int ns, int nsx, int nz, int nx, int ny)
/*< set the positions of sources in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int ixs=id%nsx;
int iys=id/nsx;
if (id<ns) szxy[id]=(fsz+mm+npd)+nnz*(fsx+ixs*dsx+mm+npd)+nnz*nnx*(fsy+iys*dsy+mm+npd);
}
//a#############################################################################################
__global__ void cuda_set_up_do(int *gzxy, int *up, int *down, int ng, int nz, int nx, int ny)
/*< set the positions of geophones & down in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int iy=id/nx;
int ix=id%nx;
if (id<ng){
gzxy[id]=(mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
up[id]=(mm+npd-1)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
down[id]=(nz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_set_fr_ba(int *front, int *back, int ng, int nz, int nx, int ny)
/*< set the positions of front & back in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int ix=id/nz;
int iz=id%nz;
if (id<ng){
front[id]=(iz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(mm+npd-1);
back[id]=(iz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(ny+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_set_le_ri(int *left, int *right,int ng, int nz,int nx, int ny)
/*< set the positions of left & right in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int iy=id/nz;
int iz=id%nz;
if (id<ng){
left[id]=(iz+mm+npd)+nnz*(mm+npd-1)+nnz*nnx*(iy+mm+npd);
right[id]=(iz+mm+npd)+nnz*(nx+mm+npd)+nnz*nnx*(iy+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_save_bndr(float *bndr, float *p0, int *front, int *back, int *left, int *right, int *up, int *down,
int nz, int nx, int ny, bool write)//(2*nz*nx+2*nz*ny+nx*ny)
/*< write boundaries out or read them into wavefield variables p>*/
{
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(write){
if(id<nz*nx)
bndr[id]=p0[front[id]]; /* front boundary */
else if((id>=nz*nx)&&(id<2*nz*nx))
bndr[id]=p0[back[id-nz*nx]]; /* back boundary */
else if((id>=2*nz*nx)&&(id<(2*nz*nx+nz*ny)))
bndr[id]=p0[left[id-2*nz*nx]]; /* left boundary */
else if((id>=(2*nz*nx+nz*ny))&&(id<(2*nz*nx+2*nz*ny)))
bndr[id]=p0[right[id-2*nz*nx-nz*ny]]; /* right boundary */
else if((id>=(2*nz*nx+2*nz*ny))&&(id<(2*nz*nx+2*nz*ny+nx*ny)))
bndr[id]=p0[up[id-2*nz*nx-2*nz*ny]]; /* up boundary */
else if((id>=(2*nz*nx+2*nz*ny+nx*ny))&&(id<(2*nz*nx+2*nz*ny+2*nx*ny)))
bndr[id]=p0[down[id-2*nz*nx-2*nz*ny-nx*ny]];/* down boundary */
}else{
if(id<nz*nx)
p0[front[id]]=bndr[id]; /* front boundary */
else if((id>=nz*nx)&&(id<2*nz*nx))
p0[back[id-nz*nx]]=bndr[id]; /* back boundary */
else if((id>=2*nz*nx)&&(id<(2*nz*nx+nz*ny)))
p0[left[id-2*nz*nx]]=bndr[id]; /* left boundary */
else if((id>=(2*nz*nx+nz*ny))&&(id<(2*nz*nx+2*nz*ny)))
p0[right[id-2*nz*nx-nz*ny]]=bndr[id]; /* right boundary */
else if((id>=(2*nz*nx+2*nz*ny))&&(id<(2*nz*nx+2*nz*ny+nx*ny)))
p0[up[id-2*nz*nx-2*nz*ny]]=bndr[id]; /* up boundary */
else if((id>=(2*nz*nx+2*nz*ny+nx*ny))&&(id<(2*nz*nx+2*nz*ny+2*nx*ny)))
p0[down[id-2*nz*nx-2*nz*ny-nx*ny]]=bndr[id]; /* down boundary */
}
}
//a#############################################################################################
__global__ void cuda_scale_gradient(float *g1, float *VV, float *illum, int nnx, int nny, int nnz, bool precon)
/*< scale g1 >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float a=VV[id];
if (precon) a*=sqrtf(illum[id]+EPS);/*precondition with residual wavefield illum*/
g1[id]*=2.0/a;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothz(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(iz+i>=0 && iz+i<nnz) s+=expf(-(2.0*i*i)/rbell)*g1[id+i];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothx(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(ix+i>=0 && ix+i<nnx) s+=expf(-(2.0*i*i)/rbell)*g1[id+i*nnz];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothy(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(iy+i>=0 && iy+i<nny) s+=expf(-(2.0*i*i)/rbell)*g1[id+i*nnz*nnx];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_cal_beta_step1(float *g0, float *g1, float *cg, int nnx, int nny, int nnz)
/*< calculate beta for nonlinear conjugate gradient algorithm
configuration requirement: <<<1,BlockSize>>> >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
s=0.0,t=0.0,r=0.0;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float a=g0[id];
float b=g1[id];
float c=cg[id];
/* HS: Hestenses-Stiefel NLCG algorithm */
s += b*(b-a); // numerator of HS
t += c*(b-a); // denominator of HS,DY
r += b*b; // numerator of DY
}
}
}
//a#############################################################################################
__global__ void cuda_cal_beta_step2(float *beta, int nnx, int nny, int nnz)
/*< set the positions of geophones & down in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<1)
{
float beta_HS=0.0;
float beta_DY=0.0;
if(t!=0)
{
beta_HS=s/t;
beta_DY=r/t;
}
*beta=max(0.0, min(beta_HS, beta_DY));/* Hybrid HS-DY method combined with iteration restart */
}
}
//a#############################################################################################
__global__ void cuda_cal_conjgrad(float *g1, float *cg, float beta, int nnx, int nny, int nnz)
/*< calculate nonlinear conjugate gradient >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
cg[id] = -g1[id]+beta*cg[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_epsilon(float *VV, float *cg, float *epsil, int N)
/*< calculate estimated stepsize (epsil) according to Taratola's method
configuration requirement: <<<1, Block_Size>>> >*/
{
__shared__ float sdata[BlockSize];/* find max(|vv(:)|) */
__shared__ float tdata[BlockSize];/* find max(|cg(:)|) */
int tid = threadIdx.x;
sdata[tid] = 0.0f;
tdata[tid] = 0.0f;
for(int s=0; s<(N+BlockSize-1)/BlockSize; s++)
{
int id=s*blockDim.x+threadIdx.x;
float a=(id<N)?fabsf(VV[id]):0.0f;
float b=(id<N)?fabsf(cg[id]):0.0f;
sdata[tid]= max(sdata[tid], a);
tdata[tid]= max(tdata[tid], b);
}
__syncthreads();
/* do reduction in shared mem */
for(int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s) {sdata[tid]=max(sdata[tid], sdata[tid+s]);tdata[tid]=max(tdata[tid], tdata[tid+s]);}
__syncthreads();
}
if (tid < 32)
{
if (blockDim.x >= 64) { sdata[tid] =max(sdata[tid],sdata[tid + 32]);tdata[tid]=max(tdata[tid], tdata[tid+32]);}
if (blockDim.x >= 32) { sdata[tid] =max(sdata[tid],sdata[tid + 16]);tdata[tid]=max(tdata[tid], tdata[tid+16]);}
if (blockDim.x >= 16) { sdata[tid] =max(sdata[tid],sdata[tid + 8]);tdata[tid]=max(tdata[tid], tdata[tid+8]);}
if (blockDim.x >= 8) { sdata[tid] =max(sdata[tid],sdata[tid + 4]);tdata[tid]=max(tdata[tid], tdata[tid+4]);}
if (blockDim.x >= 4) { sdata[tid] =max(sdata[tid],sdata[tid + 2]);tdata[tid]=max(tdata[tid], tdata[tid+2]);}
if (blockDim.x >= 2) { sdata[tid] =max(sdata[tid],sdata[tid + 1]);tdata[tid]=max(tdata[tid], tdata[tid+1]);}
}
if (tid == 0) { if(tdata[0]>EPS) *epsil=0.01*sdata[0]/tdata[0]; else *epsil=0.0;}
}
//a#############################################################################################
__global__ void cuda_com2derr(float *com, float *derr, int nx, int ny, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt){
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nx*ny*nt) derr[id]=com[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_vtmp(float *VVtmp, float *VV, float *cg, float epsil, int nnx, int nny, int nnz)
/*< calculate temporary velocity >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
VVtmp[id] =VV[id] + epsil*cg[id];
}
}
}
//a#############################################################################################
__global__ void cuda_sum_alpha12(float *alpha1, float *alpha2, float *cal, float *obs, float *derr,
int nx, int ny, int nz, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt)
{
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nx*ny*nt)
{
float c=derr[id];
float a=obs[id]+c;/* since f(mk)-dobs[id]=derr[id], thus f(mk)=b+c; */
float b=cal[id]-a;/* f(mk+epsil*cg)-f(mk) */
alpha1[ix+nx*iy]-=b*c;
alpha2[ix+nx*iy]+=b*b;
}
}
}
}
//a#############################################################################################
__global__ void cuda_cal_alpha(float *alpha, float *alpha1, float *alpha2, float epsil, int ng)
/*< calculate searched stepsize (alpha) according to Taratola's method
configuration requirement: <<<1, Block_Size>>> >*/
{
__shared__ float sdata[BlockSize];
__shared__ float tdata[BlockSize];
int tid=threadIdx.x;
sdata[tid]=0.0f;
tdata[tid]=0.0f;
for(int s=0; s<(ng+BlockSize-1)/BlockSize; s++)
{
int id=s*blockDim.x+threadIdx.x;
float a=(id<ng)?alpha1[id]:0.0f;
float b=(id<ng)?alpha2[id]:0.0f;
sdata[tid] +=a;
tdata[tid] +=b;
}
__syncthreads();
/* do reduction in shared mem */
for(int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s) { sdata[tid] += sdata[tid + s];tdata[tid] += tdata[tid + s]; } __syncthreads();
}
if (tid < 32)
{
if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
if (tid == 0)
{
if(tdata[0]>EPS) *alpha=epsil*sdata[0]/(tdata[0]+EPS);
else *alpha=0.0;
}
}
//a#############################################################################################
__global__ void cuda_update_vel(float *VV, float *cg, float alpha, int nnx, int nny, int nnz)
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz) VV[id]=VV[id]+alpha*cg[id];
}
}
//a#############################################################################################
//a### ###
//a### Main Function ###
//a### ###
//a#############################################################################################
int main(int argc, char* argv[])
{
int nz, nx, ny, nnz, nnx, nny, ns, nsx, nt, it, is, fsz, fsx, fsy, dsz, dsx, dsy, ng, iter, niter;
int *coo_source, *coo_receivers, *coo_up, *coo_down, *coo_front, *coo_back, *coo_left, *coo_right;
float dz, dx, dy, favg, dt, _dz2, _dx2, _dy2, pfac;
float *v, *vv, *wavelet, *VV, *VVtmp, *s_P0, *s_P1, *ptr, *g_P0, *g_P1, *s_Ptt;
float *p_cal, *p_IO, *p_obs, *p_com, *s_bndr, *p_derr;
float *g0, *g1, *cg, *illum, *pars;
float obj1, obj, beta, epsil, alpha, *alpha1, *alpha2, *objval;
//a######################################
char FNvel[250]={"vel201202203initial.dat"};
char FNsobs[250]={"shot_obs.dat"};
char FNscal[250]={"shot_cal.dat"};
char FNscom[250]={"shot_com.dat"};
char FNgrad[250]={"gradient.dat"};
char FNillum[250]={"illum.dat"};
char FNupdatevel[250]={"velupdate.dat"};
char FNlastvel[250]={"vellastIter.dat"};
char FNobjs[250]={"objections.txt"};
//a######################################
nx=201; dx=10;
ny=1; dy=10;
nz=203; dz=10;
nt=1501; favg=20; pfac=100;
dt=0.001;
ns=5; nsx=5;
fsx=10; dsx=40;
fsy=1; dsy=0;
fsz=1; dsz=0;
niter=50;
//a######################################
FILE *fpvel, *fpscal, *fpsobs, *fpgrad, *fpscom, *fpillum, *fpupdatevel, *fplastvel, *fpobjs;
if((fpvel=fopen(FNvel,"rb"))==NULL){printf("### < %s > read error!\n",FNvel);exit(0);}
if((fpsobs=fopen(FNsobs,"rb"))==NULL){printf("### < %s > read error!\n",FNsobs);exit(0);}
fpscal=fopen(FNscal,"wb");
fpscom=fopen(FNscom,"wb");
fpgrad=fopen(FNgrad,"wb");
fpillum=fopen(FNillum,"wb");
fpupdatevel=fopen(FNupdatevel,"wb");
fplastvel=fopen(FNlastvel,"wb");
fpobjs=fopen(FNobjs,"w");
//a######################################
_dz2=1.0/(dz*dz);
_dx2=1.0/(dx*dx);
_dy2=1.0/(dy*dy);
nnz=nz+2*mm+2*npd;
nnx=nx+2*mm+2*npd;
nny=ny+2*mm+2*npd;
ng=nx*ny;
//a######################################
v=(float*)malloc(nz*nx*ny*sizeof(float));
vv=(float*)malloc(nnz*nnx*nny*sizeof(float));
p_IO=(float*)malloc(ng*nt*sizeof(float));
objval=(float*)malloc(niter*sizeof(float));
memset(p_IO, 0, ng*nt*sizeof(float));
memset(objval, 0, niter*sizeof(float));
fread(v, sizeof(float), nz*nx*ny, fpvel);
velocity_transform(v, vv, dt, nz, nx, ny);
/*< initialize device, default device=0 >*/
hipSetDevice(0);
check_gpu_error("Failed to initialize device!");
dim3 dimg, dimb, dimt;
dimg.x=(nz+2*npd+2*mm+BlockSize1-1)/BlockSize1;
dimg.y=(nx+2*npd+2*mm+BlockSize2-1)/BlockSize2;
dimt.x=(nt+BlockSize1-1)/BlockSize1;
dimt.y=(nx+BlockSize2-1)/BlockSize2;
dimb.x=BlockSize1;
dimb.y=BlockSize2;
/* allocate memory on device */
/*< wavelet & velocity >*/
hipMalloc(&wavelet, nt*sizeof(float));
hipMalloc(&VV, nnz*nnx*nny*sizeof(float));
hipMalloc(&VVtmp, nnz*nnx*nny*sizeof(float));
/*< forward & backward & receivers wavefield >*/
hipMalloc(&s_P0, nnz*nnx*nny*sizeof(float));
hipMalloc(&s_P1, nnz*nnx*nny*sizeof(float));
hipMalloc(&g_P0, nnz*nnx*nny*sizeof(float));
hipMalloc(&g_P1, nnz*nnx*nny*sizeof(float));
hipMalloc(&s_Ptt, nnz*nnx*nny*sizeof(float));
/*< shot & receivers location >*/
hipMalloc(&coo_source, ns*sizeof(int));
hipMalloc(&coo_receivers, ng*sizeof(int));
/*< boundary location >*/
hipMalloc(&coo_up , nx*ny*sizeof(int));
hipMalloc(&coo_down , nx*ny*sizeof(int));
hipMalloc(&coo_front, nx*nz*sizeof(int));
hipMalloc(&coo_back , nx*nz*sizeof(int));
hipMalloc(&coo_left , ny*nz*sizeof(int));
hipMalloc(&coo_right, ny*nz*sizeof(int));
/*< calculated/synthetic seismic data (it & nt & 6's boundary) >*/
hipMalloc(&p_cal, ng*nt*sizeof(float));
hipMalloc(&p_obs, ng*nt*sizeof(float));
hipMalloc(&p_com, ng*nt*sizeof(float));
hipMalloc(&p_derr, ng*nt*sizeof(float));
hipMalloc(&alpha1, ng*sizeof(float));
hipMalloc(&alpha2, ng*sizeof(float));
hipMalloc(&s_bndr, nt*(2*nz*nx+2*nz*ny+2*nx*ny)*sizeof(float));
/*< The is & ns gradient ,lighting matrix >*/
hipMalloc(&g0, nnz*nnx*nny*sizeof(float));
hipMalloc(&g1, nnz*nnx*nny*sizeof(float));
hipMalloc(&cg, nnz*nnx*nny*sizeof(float));
hipMalloc(&illum, nnz*nnx*nny*sizeof(float));
hipMemset(g1, 0, nnz*nnx*nny*sizeof(float));
hipMemset(cg, 0, nnz*nnx*nny*sizeof(float));
/* d_pars[0]: obj; d_pars[1]: beta; d_pars[2]: epsilon; d_pars[3]: alpha; */
hipMalloc(&pars, 4*sizeof(float));
hipMemset(pars, 0, 4*sizeof(float));
check_gpu_error("Failed to allocate memory for variables!");
hipLaunchKernelGGL(( cuda_ricker_wavelet), dim3((nt+BlockSize-1)/BlockSize), dim3(BlockSize), 0, 0, wavelet, favg, dt, nt, pfac);
hipMemcpy(VV, vv, nnz*nnx*nny*sizeof(float), hipMemcpyHostToDevice);
/*< shot location >*/
hipLaunchKernelGGL(( cuda_set_s), dim3(1), dim3(ns), 0, 0, coo_source, fsz, fsx, fsy, dsz, dsx, dsy, ns, nsx, nz, nx, ny);
/*< receivers(up),down,front,back,left,right location >*/
hipLaunchKernelGGL(( cuda_set_up_do), dim3((nx*ny+BlockSize-1)/BlockSize),dim3(BlockSize), 0, 0, coo_receivers, coo_up,coo_down, nx*ny, nz, nx, ny);
hipLaunchKernelGGL(( cuda_set_fr_ba), dim3((nz*nx+BlockSize-1)/BlockSize),dim3(BlockSize), 0, 0, coo_front, coo_back, nz*nx, nz, nx, ny);
hipLaunchKernelGGL(( cuda_set_le_ri), dim3((nz*ny+BlockSize-1)/BlockSize),dim3(BlockSize), 0, 0, coo_left, coo_right, nz*ny, nz, nx, ny);
clock_t iter_t0, iter_t1, is_t0, is_t1, ns_t0, ns_t1;
printf("##########################################\n");
printf("###\n");
for(iter=0; iter<niter; iter++)
{
iter_t0=clock();
printf("########## Iter =%3d ########## \n###\n",iter+1);
hipMemcpy(g0, g1, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToDevice);
hipMemset(g1, 0, nnz*nnx*nny*sizeof(float));
hipMemset(illum, 0, nnz*nnx*nny*sizeof(float));
hipMemset(p_derr, 0, ng*nt*sizeof(float));
hipMemset(alpha1, 0, ng*sizeof(float));
hipMemset(alpha2, 0, ng*sizeof(float));
hipMemset(pars, 0, 4*sizeof(float));
rewind(fpscal);
rewind(fpsobs);
rewind(fpscom);
rewind(fpillum);
rewind(fpgrad);
ns_t0=clock();
for(is=0; is<ns; is++)
{
is_t0=clock();
hipMemset(s_P0, 0, nnz*nnx*nny*sizeof(float));
hipMemset(s_P1, 0, nnz*nnx*nny*sizeof(float));
hipMemset(g_P0, 0, nnz*nnx*nny*sizeof(float));
hipMemset(g_P1, 0, nnz*nnx*nny*sizeof(float));
hipMemset(s_Ptt, 0, nnz*nnx*nny*sizeof(float));
hipMemset(p_cal, 0, ng*nt*sizeof(float));
hipMemset(p_obs, 0, ng*nt*sizeof(float));
hipMemset(p_com, 0, ng*nt*sizeof(float));
hipMemset(s_bndr, 0, nt*(2*nz*nx+2*nz*ny+2*nx*ny)*sizeof(float));
for(it=0; it<nt; it++)
{
//if(it%400==0) printf("For: is=%2d, it=%d\n",is,it);
hipLaunchKernelGGL(( cuda_add_source), dim3(1),dim3(1), 0, 0, true, s_P1, &wavelet[it], &coo_source[is], 1);
hipLaunchKernelGGL(( cuda_step_fd3d), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, nz, nx, ny, -0.25);
hipLaunchKernelGGL(( cuda_save_bndr), dim3(((2*nz*nx+2*nz*ny+2*nx*ny)+BlockSize-1)/BlockSize),dim3(BlockSize), 0, 0,
&s_bndr[it*(2*nz*nx+2*nz*ny+2*nx*ny)],
s_P0, coo_front, coo_back, coo_left, coo_right, coo_up, coo_down,
nz, nx, ny, true);
hipLaunchKernelGGL(( cuda_cal_illum), dim3(dimg),dim3(dimb), 0, 0, illum, s_P0, nz, nx, ny);
hipLaunchKernelGGL(( cuda_record), dim3((ng+BlockSize-1)/BlockSize), dim3(BlockSize), 0, 0, s_P0, p_cal, coo_receivers, ng, it, nt, true);
}//it loop end
hipMemcpy(p_IO, p_cal, ng*nt*sizeof(float), hipMemcpyDeviceToHost);
fwrite(p_IO, sizeof(float), ng*nt, fpscal);
fseek(fpsobs,is*ng*nt*sizeof(float),0);
fread(p_IO, sizeof(float), ng*nt, fpsobs);
hipMemcpy(p_obs, p_IO, ng*nt*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_cal_residuals), dim3(dimt), dim3(dimb), 0, 0, &pars[0], p_cal, p_obs, p_com, ng*nt, nx, ny, nt);
if(is=hipLaunchKernelGGL((=0)cuda_com2derr), dim3(dimt), dim3(dimb), 0, 0, p_com, p_derr, nx, ny, nt);
hipMemcpy(&obj, &pars[0], sizeof(float), hipMemcpyDeviceToHost);
if(is==(ns/2+1)){ hipMemcpy(p_IO, p_com, ng*nt*sizeof(float), hipMemcpyDeviceToHost);
fseek(fpscom,is*ng*nt*sizeof(float),0);
fwrite(p_IO, sizeof(float), ng*nt, fpscom); }
for(it=nt-1; it>-1; it--)
{
//if(it%400==0) printf("Back: is=%2d, it=%d\n",is,it);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
hipLaunchKernelGGL(( cuda_save_bndr), dim3(((2*nz*nx+2*nz*ny+2*nx*ny)+BlockSize-1)/BlockSize),dim3(BlockSize), 0, 0,
&s_bndr[it*(2*nz*nx+2*nz*ny+2*nx*ny)],
s_P1, coo_front, coo_back, coo_left, coo_right, coo_up, coo_down,
nz, nx, ny, false);
hipLaunchKernelGGL(( cuda_step_fd3d), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, s_Ptt, true);
hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, nz, nx, ny, -0.25);
hipLaunchKernelGGL(( cuda_record), dim3((ng+BlockSize-1)/BlockSize), dim3(BlockSize), 0, 0, g_P1, p_com, coo_receivers, ng, it, nt, false);
hipLaunchKernelGGL(( cuda_step_fd3d), dim3(dimg),dim3(dimb), 0, 0, g_P0, g_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=g_P0; g_P0=g_P1; g_P1=ptr;
hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, g_P0, g_P1, nz, nx, ny, -0.25);
hipLaunchKernelGGL(( cuda_cal_illum), dim3(dimg),dim3(dimb), 0, 0, illum, g_P1, nz, nx, ny);
hipLaunchKernelGGL(( cuda_cal_g1), dim3(dimg),dim3(dimb), 0, 0, g1, s_Ptt, g_P1, nz, nx, ny);
}// it loop end
is_t1=clock();
printf("### IS:(%2d) %.2f(min);\n",is,((float)(is_t1-is_t0))/60000000.0);
}//IS loop end
ns_t1=clock();
printf("### Cal gradient: %.2f (min)\n",((float)(ns_t1-ns_t0))/60000000.0);
hipMemcpy(vv, illum, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float), nz*nx*ny, fpillum);
/* compute the gradient of FWI by scaling, precondition incorporated here */
hipLaunchKernelGGL(( cuda_scale_gradient), dim3(dimg),dim3(dimb), 0, 0, g1, VV, illum, nnx, nny, nnz, true);
/* Gaussian smoothing for the sharp gradient */
hipLaunchKernelGGL(( cuda_bell_smoothz), dim3(dimg),dim3(dimb), 0, 0, g1, 2, nnx, nny, nnz);
hipLaunchKernelGGL(( cuda_bell_smoothx), dim3(dimg),dim3(dimb), 0, 0, g1, 2, nnx, nny, nnz);
hipLaunchKernelGGL(( cuda_bell_smoothy), dim3(dimg),dim3(dimb), 0, 0, g1, 2, nnx, nny, nnz);
/* calculate the factor beta in conjugate gradient method */
if (iter>0)
{
hipLaunchKernelGGL(( cuda_cal_beta_step1), dim3(dimg),dim3(dimb), 0, 0, g0, g1, cg, nnx, nny, nnz);
hipLaunchKernelGGL(( cuda_cal_beta_step2), dim3(1),dim3(1), 0, 0, &pars[1], nnx, nny, nnz);
}
hipMemcpy(&beta, &pars[1], sizeof(float), hipMemcpyDeviceToHost);
/* compute the conjugate gradient */
hipLaunchKernelGGL(( cuda_cal_conjgrad), dim3(dimg),dim3(dimb), 0, 0, g1, cg, beta, nnx, nny, nnz);
hipMemcpy(vv, cg, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float), nz*nx*ny, fpgrad);
/* estimate epsilon according to equation 11 */
hipLaunchKernelGGL(( cuda_cal_epsilon), dim3(1), dim3(BlockSize), 0, 0, VV, cg, &pars[2], nnx*nnz*nny);
hipMemcpy(&epsil, &pars[2], sizeof(float), hipMemcpyDeviceToHost);
/* obtain a tentative velocity model to estimate a good stepsize alpha */
hipLaunchKernelGGL(( cuda_cal_vtmp), dim3(dimg),dim3(dimb), 0, 0, VVtmp, VV, cg, epsil, nnx, nny, nnz);
ns_t0=clock();
printf("### Cal alpha:");
for(is=0; is<1; is++)
{
hipMemset(s_P0, 0, nnz*nnx*nny*sizeof(float));
hipMemset(s_P1, 0, nnz*nnx*nny*sizeof(float));
hipMemset(p_cal, 0, ng*nt*sizeof(float));
fseek(fpsobs,is*ng*nt*sizeof(float),0);
fread(p_IO, sizeof(float), ng*nt, fpsobs);
hipMemcpy(p_obs, p_IO, ng*nt*sizeof(float), hipMemcpyHostToDevice);
for(it=0; it<nt; it++)
{
hipLaunchKernelGGL(( cuda_add_source), dim3(1),dim3(1), 0, 0, true, s_P1, &wavelet[it], &coo_source[is], 1);
hipLaunchKernelGGL(( cuda_step_fd3d), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, VVtmp, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
hipLaunchKernelGGL(( cuda_absorb_bndr), dim3(dimg),dim3(dimb), 0, 0, s_P0, s_P1, nz, nx, ny, -0.25);
hipLaunchKernelGGL(( cuda_record), dim3((ng+BlockSize-1)/BlockSize), dim3(BlockSize), 0, 0, s_P0, p_cal, coo_receivers, ng, it, nt, true);
}//it loop end
hipLaunchKernelGGL(( cuda_sum_alpha12), dim3(dimt), dim3(dimb), 0, 0, alpha1, alpha2, p_cal, p_obs, p_derr, nx, ny, nz, nt);
}//is loop end
hipLaunchKernelGGL(( cuda_cal_alpha), dim3(1),dim3(BlockSize), 0, 0, &pars[3], alpha1, alpha2, epsil, ng);
hipMemcpy(&alpha, &pars[3], sizeof(float), hipMemcpyDeviceToHost);
ns_t1=clock();printf(" %.2f (min)\n",((float)(ns_t1-ns_t0))/60000000.0);
/* update the velocity model according to previous velocity, conjugate gradient and estimated stepsize */
hipLaunchKernelGGL(( cuda_update_vel), dim3(dimg),dim3(dimb), 0, 0, VV, cg, alpha, nnx, nny, nnz);
hipMemcpy(vv, VV, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float),nz*nx*ny, fpupdatevel);
/* compute the normalized objective function */
if(iter==0) {obj1=obj; objval[iter]=1.0;}
else objval[iter]=obj/obj1;
iter_t1=clock();
printf("### objval=%f, beta=%f, epsil=%.2f, alpha=%.2f : %.2f(min)\n",
objval[iter],beta,epsil,alpha,((float)(iter_t1-iter_t0))/60000000.0);
fprintf(fpobjs,"iter=%3d, obj=%f;\n",iter+1,objval[iter]);
hipMemcpy(vv, VV, nnz*nnx*nny*sizeof(float), hipMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
rewind(fplastvel);
fwrite(v, sizeof(float),nz*nx*ny, fplastvel);
}//iter loop end
printf("##################################\n");
/* free memory on device */
hipFree(wavelet);
hipFree(VV);
hipFree(VVtmp);
/*< wavefield(x-y-z) >*/
hipFree(s_P0);
hipFree(s_P1);
hipFree(g_P0);
hipFree(g_P1);
hipFree(s_Ptt);
/*< location >*/
hipFree(coo_source);
hipFree(coo_receivers);
hipFree(coo_front);
hipFree(coo_back);
hipFree(coo_left);
hipFree(coo_right);
hipFree(coo_down);
hipFree(coo_up);
/*< gradient >*/
hipFree(g0);
hipFree(g1);
hipFree(cg);
hipFree(illum);
hipFree(pars);
/*< wavefield(t-x-y-z) >*/
hipFree(p_cal);
hipFree(p_obs);
hipFree(p_com);
hipFree(p_derr);
hipFree(alpha1);
hipFree(alpha2);
hipFree(s_bndr);
/*< free alloc >*/
free(v);
free(vv);
free(p_IO);
free(objval);
fclose(fpvel);
fclose(fpscal);
fclose(fpsobs);
fclose(fpscom);
fclose(fpgrad);
fclose(fpillum);
fclose(fpupdatevel);
fclose(fplastvel);
fclose(fpobjs);
exit (0);
}
| 0a87c62f48b9a2458dcf76a68b7d6fff76726791.cu | //############################################
//#
//#
//#
//#
//##############################################
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#define PI 3.141592653
#define EPS 0.000000001
#define BlockSize1 16// tile size in 1st-axis
#define BlockSize2 16// tile size in 2nd-axis
#define BlockSize 512
#define mm 4 // half of the order in space
#define npd 20 // absorbing boundry condition wield
__device__ float s, t, r;
//a#############################################################################################
__constant__ float stencil[mm+1]={-205.0/72.0,8.0/5.0,-1.0/5.0,8.0/315.0,-1.0/560.0};
//a#############################################################################################
__global__ void cuda_step_fd3d(float *p0, float *p1, float *VV, float _dz2, float _dx2, float _dy2, int n1, int n2, int n3,
float dt, float *pdt2, bool pdt)
/*< step forward: 3-D FD, order=8 >*/
{
bool validr = true;
bool validw = true;
const int gtid1 = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int gtid2 = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
const int ltid1 = threadIdx.x;//ithreadz
const int ltid2 = threadIdx.y;//ithreadx
const int work1 = blockDim.x;//nblockz
const int work2 = blockDim.y;//nblockx
__shared__ float tile[BlockSize2 + 2 * mm][BlockSize1 + 2 * mm];//tile[16+2*mm][16+2*mm]
const int stride2 = n1 + 2 * mm + 2 * npd;//n1=nz
const int stride3 = stride2 * (n2 + 2 * mm + 2 * npd);//n2=nx stride3=(nz+2*mm)*(nx+2*mm)
int inIndex = 0;
int outIndex = 0;
// Advance inputIndex to start of inner volume
inIndex += (mm ) * stride2 + mm ;// inIndex=mm*(nz+2*mm+2*npd)+mm;
// Advance inputIndex to target element
inIndex += gtid2 * stride2 + gtid1; // inIndex=mm*(nz+2*mm)+mm+ix*(nz+2*mm+2*npd)+iz;:igrid
float infront[mm];
float behind[mm];
float current;
const int t1 = ltid1 + mm;
const int t2 = ltid2 + mm;
// Check in bounds
if ((gtid1 >= n1 + mm + 2*npd) ||(gtid2 >= n2 + mm + 2*npd)) validr = false;
if ((gtid1 >= n1 + 2*npd) ||(gtid2 >= n2 + 2*npd)) validw = false;
// Preload the "infront" and "behind" data
for (int i = mm -2 ; i >= 0 ; i--)//change 'mm-2' to 'mm-1'+++++++++++++++++++
{
if (validr) behind[i] = p1[inIndex];
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
}
if (validr) current = p1[inIndex];
outIndex = inIndex;
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
for (int i = 0 ; i < mm ; i++)
{
if (validr) infront[i] = p1[inIndex];
inIndex += stride3;//stride3=(nz+2*mm)*(nx+2*mm)
}
// Step through the zx-planes
for (int i3 = mm ; i3 < n3 + 2*npd + mm ; i3++)
{
// Advance the slice (move the thread-front)
for (int i = mm - 1 ; i > 0 ; i--) behind[i] = behind[i - 1];
behind[0] = current;
current = infront[0];
for (int i = 0 ; i < mm - 1 ; i++) infront[i] = infront[i + 1];
if (validr) infront[mm - 1] = p1[inIndex];
inIndex += stride3;
outIndex += stride3;
__syncthreads();
// Update the data slice in the local tile
// Halo above & below
if (ltid2 < mm)
{
/* tile[ithread][ithread+mm]=p1[igrid - mm*(nz+2*mm)] */
tile[ltid2][t1] = p1[outIndex - mm * stride2];//t1 = ltid1 + mm;
tile[ltid2 + work2 + mm][t1] = p1[outIndex + work2 * stride2];
}
// Halo left & right
if (ltid1 < mm)
{
tile[t2][ltid1] = p1[outIndex - mm];
tile[t2][ltid1 + work1 + mm] = p1[outIndex + work1];
}
tile[t2][t1] = current;
__syncthreads();
// Compute the output value
float c1, c2, c3;
c1=c2=c3=stencil[0]*current;
for (int i=1; i <= mm ; i++)
{
c1 +=stencil[i]*(tile[t2][t1-i]+ tile[t2][t1+i]);//z
c2 +=stencil[i]*(tile[t2-i][t1]+ tile[t2+i][t1]);//x
c3 +=stencil[i]*(infront[i-1] + behind[i-1] ); //y
}
c1*=_dz2;
c2*=_dx2;
c3*=_dy2;
if (validw&&pdt) pdt2[outIndex]= (c1+c2+c3);
if (validw) p0[outIndex]=2.0*p1[outIndex]-p0[outIndex]+VV[outIndex]*VV[outIndex]*dt*dt*(c1+c2+c3);
}
}
//a#############################################################################################
void check_gpu_error (const char *msg)
/*< check GPU errors >*/
{
cudaError_t err = cudaGetLastError ();
if (cudaSuccess != err) {
printf ("Cuda error: %s: %s", msg, cudaGetErrorString (err));
exit(0);
}
}
//a#############################################################################################
void window3d(float *a, float *b, int n1, int n2, int n3)
/*< window a 3d subvolume >*/
{
int i1, i2, i3, nn1, nn2;
nn1=n1+2*mm+ 2*npd;//z
nn2=n2+2*mm+ 2*npd;//x
for(i3=0; i3<n3; i3++)
for(i2=0; i2<n2; i2++)
for(i1=0; i1<n1; i1++)
{
a[i1+n1*i2+n1*n2*i3]=b[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)];
}
}
//a#############################################################################################
void velocity_transform(float *v, float*vv, float dt, int n1, int n2, int n3)
/*< velocit2 transform: vv=v*dt; vv<--vv^2 >*/
{
int i1, i2, i3, nn1, nn2, nn3;
float tmp;
nn1=n1+2*mm+2*npd;
nn2=n2+2*mm+2*npd;
nn3=n3+2*mm+2*npd;
// inner zone
for(i3=0; i3<n3; i3++){//y
for(i2=0; i2<n2; i2++){//x
for(i1=0; i1<n1; i1++){//z
tmp=v[i1+n1*i2+n1*n2*i3];
vv[(i1+mm+npd)+nn1*(i2+mm+npd)+nn1*nn2*(i3+mm+npd)]=tmp;
}
}
}
//top & down
for(i3=0; i3<nn3; i3++){//y
for(i2=0; i2<nn2; i2++){//x
for (i1=0; i1<mm+npd; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[mm+npd+nn1*i2+nn1*nn2*i3];
vv[(nn1-i1-1)+nn1*i2+nn1*nn2*i3]=vv[(nn1-mm-npd-1)+nn1*i2+nn1*nn2*i3];
}
}
}
//left & right
for(i3=0; i3<nn3; i3++){//y
for(i2=0; i2<mm+npd; i2++){//x
for (i1=0; i1<nn1; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*(mm+npd)+nn1*nn2*i3];
vv[i1+nn1*(nn2-i2-1)+nn1*nn2*i3]=vv[i1+nn1*(nn2-mm-npd-1)+nn1*nn2*i3];
}
}
}
//front & back
for(i3=0; i3<mm+npd; i3++){//y
for(i2=0; i2<nn2; i2++){//x
for(i1=0; i1<nn1; i1++){//z
vv[i1+nn1*i2+nn1*nn2*i3]=vv[i1+nn1*i2+nn1*nn2*(mm+npd)];
vv[i1+nn1*i2+nn1*nn2*(nn3-1-i3)]=vv[i1+nn1*i2+nn1*nn2*(nn3-mm-npd-1)];
}
}
}
}
//a#############################################################################################
__global__ void cuda_add_source(bool add, float *p, float *source, int *szxy, int ns)
/*< add/subtract sources: length of source[]=ns, index stored in szxy[] >*/
{
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(id<ns){
if(add){
p[szxy[id]]+=source[id];
}else{
p[szxy[id]]-=source[id];
}
}
}
//a#############################################################################################
__global__ void cuda_record(float *P, float *seis, int *gxz, int ng, int it, int nt, bool record)//++++++++++++
/*< record the seismogram at time it >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if (id<ng)
{
if(record) seis[it+id*nt]=P[gxz[id]];
else P[gxz[id]]=seis[it+id*nt];
}
}
//a#############################################################################################
__global__ void cuda_cal_illum(float *s, float *p, int nz, int nx, int ny)
/*< calculate the source lighting matrix >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) s[id]+=p[id]*p[id];
}
}
//a#############################################################################################
__global__ void cuda_illum(float *g1, float *illum, int nz, int nx, int ny)
/*< source lighting >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny&&illum[id]!=0) g1[id]/=illum[id];
}
}
//a#############################################################################################
__global__ void cuda_sum(float *ns, float *is, int nz, int nx, int ny)
/*< source lighting >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) ns[id]+=is[id];
}
}
//a#############################################################################################
__global__ void cuda_cal_g1(float *g1, float *s, float *g, int nz, int nx, int ny)
/*< calculate is g1 >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnz*nnx*nny) g1[id]+=s[id]*g[id];
}
}
//a#############################################################################################
__global__ void cuda_absorb_bndr(float *P,float *Q,int nz,int nx,int ny,float qp)
/*< absorb boundry condition >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int nny=ny+2*mm+2*npd;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
/*< front & back (0<y<ny) >*/
if ( iy < npd ){
P[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-iy)/(1.0*npd),2) + 1 )*Q[id];
}else if ( iy >= 2*mm + npd + ny ){
P[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((iy-2*mm-npd-ny)/(1.0*npd),2) + 1 )*Q[id];
}
/*< left & right (0<x<nx) >*/
if ( ix < npd ){
P[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-ix)/(1.0*npd),2) + 1 )*Q[id];
}else if ( ix >= 2*mm + npd + nx ){
P[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((ix-2*mm-npd-nx)/(1.0*npd),2) + 1 )*Q[id];
}
/*< up & down (0<z<nz) >*/
if ( iz < npd ){
P[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((npd-iz)/(1.0*npd),2) + 1 )*Q[id];
}else if ( iz >= 2*mm + npd + nz ){
P[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*P[id];
Q[id]=( qp*pow((iz-2*mm-npd-nz)/(1.0*npd),2) + 1 )*Q[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_residuals(float *obj, float *cal, float *obs, float *com, int nn, int nx, int ny, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt){
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nn)
com[id]=cal[id] - obs[id];
*obj+=com[id]*com[id];
}
}
}
//a#############################################################################################
__global__ void cuda_ricker_wavelet(float *wlt, float favg, float dt, int nt, float pfac)
/*< generate ricker wavelet with time deley >*/
{
int it=threadIdx.x+blockDim.x*blockIdx.x;
if (it<nt){
float tmp = PI*favg*fabsf(it*dt-1.0/favg);//delay the wavelet to exhibit all waveform
tmp *=tmp;
wlt[it]= (1.0-2.0*tmp)*expf(-tmp);// ricker wavelet at time: t=nt*dt
}
}
//a#############################################################################################
__global__ void cuda_set_s(int *szxy, int fsz, int fsx, int fsy, int dsz, int dsx, int dsy, int ns, int nsx, int nz, int nx, int ny)
/*< set the positions of sources in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int ixs=id%nsx;
int iys=id/nsx;
if (id<ns) szxy[id]=(fsz+mm+npd)+nnz*(fsx+ixs*dsx+mm+npd)+nnz*nnx*(fsy+iys*dsy+mm+npd);
}
//a#############################################################################################
__global__ void cuda_set_up_do(int *gzxy, int *up, int *down, int ng, int nz, int nx, int ny)
/*< set the positions of geophones & down in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int iy=id/nx;
int ix=id%nx;
if (id<ng){
gzxy[id]=(mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
up[id]=(mm+npd-1)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
down[id]=(nz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(iy+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_set_fr_ba(int *front, int *back, int ng, int nz, int nx, int ny)
/*< set the positions of front & back in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int ix=id/nz;
int iz=id%nz;
if (id<ng){
front[id]=(iz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(mm+npd-1);
back[id]=(iz+mm+npd)+nnz*(ix+mm+npd)+nnz*nnx*(ny+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_set_le_ri(int *left, int *right,int ng, int nz,int nx, int ny)
/*< set the positions of left & right in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
int nnz=nz+2*mm+2*npd;
int nnx=nx+2*mm+2*npd;
int iy=id/nz;
int iz=id%nz;
if (id<ng){
left[id]=(iz+mm+npd)+nnz*(mm+npd-1)+nnz*nnx*(iy+mm+npd);
right[id]=(iz+mm+npd)+nnz*(nx+mm+npd)+nnz*nnx*(iy+mm+npd);
}
}
//a#############################################################################################
__global__ void cuda_save_bndr(float *bndr, float *p0, int *front, int *back, int *left, int *right, int *up, int *down,
int nz, int nx, int ny, bool write)//(2*nz*nx+2*nz*ny+nx*ny)
/*< write boundaries out or read them into wavefield variables p>*/
{
int id=threadIdx.x+blockIdx.x*blockDim.x;
if(write){
if(id<nz*nx)
bndr[id]=p0[front[id]]; /* front boundary */
else if((id>=nz*nx)&&(id<2*nz*nx))
bndr[id]=p0[back[id-nz*nx]]; /* back boundary */
else if((id>=2*nz*nx)&&(id<(2*nz*nx+nz*ny)))
bndr[id]=p0[left[id-2*nz*nx]]; /* left boundary */
else if((id>=(2*nz*nx+nz*ny))&&(id<(2*nz*nx+2*nz*ny)))
bndr[id]=p0[right[id-2*nz*nx-nz*ny]]; /* right boundary */
else if((id>=(2*nz*nx+2*nz*ny))&&(id<(2*nz*nx+2*nz*ny+nx*ny)))
bndr[id]=p0[up[id-2*nz*nx-2*nz*ny]]; /* up boundary */
else if((id>=(2*nz*nx+2*nz*ny+nx*ny))&&(id<(2*nz*nx+2*nz*ny+2*nx*ny)))
bndr[id]=p0[down[id-2*nz*nx-2*nz*ny-nx*ny]];/* down boundary */
}else{
if(id<nz*nx)
p0[front[id]]=bndr[id]; /* front boundary */
else if((id>=nz*nx)&&(id<2*nz*nx))
p0[back[id-nz*nx]]=bndr[id]; /* back boundary */
else if((id>=2*nz*nx)&&(id<(2*nz*nx+nz*ny)))
p0[left[id-2*nz*nx]]=bndr[id]; /* left boundary */
else if((id>=(2*nz*nx+nz*ny))&&(id<(2*nz*nx+2*nz*ny)))
p0[right[id-2*nz*nx-nz*ny]]=bndr[id]; /* right boundary */
else if((id>=(2*nz*nx+2*nz*ny))&&(id<(2*nz*nx+2*nz*ny+nx*ny)))
p0[up[id-2*nz*nx-2*nz*ny]]=bndr[id]; /* up boundary */
else if((id>=(2*nz*nx+2*nz*ny+nx*ny))&&(id<(2*nz*nx+2*nz*ny+2*nx*ny)))
p0[down[id-2*nz*nx-2*nz*ny-nx*ny]]=bndr[id]; /* down boundary */
}
}
//a#############################################################################################
__global__ void cuda_scale_gradient(float *g1, float *VV, float *illum, int nnx, int nny, int nnz, bool precon)
/*< scale g1 >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float a=VV[id];
if (precon) a*=sqrtf(illum[id]+EPS);/*precondition with residual wavefield illum*/
g1[id]*=2.0/a;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothz(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(iz+i>=0 && iz+i<nnz) s+=expf(-(2.0*i*i)/rbell)*g1[id+i];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothx(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(ix+i>=0 && ix+i<nnx) s+=expf(-(2.0*i*i)/rbell)*g1[id+i*nnz];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_bell_smoothy(float *g1, int rbell, int nnx, int nny, int nnz)
/*< smoothing with gaussian function >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int i,id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float s=0.0;
for(i=-rbell; i<=rbell; i++) if(iy+i>=0 && iy+i<nny) s+=expf(-(2.0*i*i)/rbell)*g1[id+i*nnz*nnx];
g1[id]=s;
}
}
}
//a#############################################################################################
__global__ void cuda_cal_beta_step1(float *g0, float *g1, float *cg, int nnx, int nny, int nnz)
/*< calculate beta for nonlinear conjugate gradient algorithm
configuration requirement: <<<1,BlockSize>>> >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
s=0.0,t=0.0,r=0.0;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
float a=g0[id];
float b=g1[id];
float c=cg[id];
/* HS: Hestenses-Stiefel NLCG algorithm */
s += b*(b-a); // numerator of HS
t += c*(b-a); // denominator of HS,DY
r += b*b; // numerator of DY
}
}
}
//a#############################################################################################
__global__ void cuda_cal_beta_step2(float *beta, int nnx, int nny, int nnz)
/*< set the positions of geophones & down in whole domain >*/
{
int id=threadIdx.x+blockDim.x*blockIdx.x;
if(id<1)
{
float beta_HS=0.0;
float beta_DY=0.0;
if(t!=0)
{
beta_HS=s/t;
beta_DY=r/t;
}
*beta=max(0.0, min(beta_HS, beta_DY));/* Hybrid HS-DY method combined with iteration restart */
}
}
//a#############################################################################################
__global__ void cuda_cal_conjgrad(float *g1, float *cg, float beta, int nnx, int nny, int nnz)
/*< calculate nonlinear conjugate gradient >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
cg[id] = -g1[id]+beta*cg[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_epsilon(float *VV, float *cg, float *epsil, int N)
/*< calculate estimated stepsize (epsil) according to Taratola's method
configuration requirement: <<<1, Block_Size>>> >*/
{
__shared__ float sdata[BlockSize];/* find max(|vv(:)|) */
__shared__ float tdata[BlockSize];/* find max(|cg(:)|) */
int tid = threadIdx.x;
sdata[tid] = 0.0f;
tdata[tid] = 0.0f;
for(int s=0; s<(N+BlockSize-1)/BlockSize; s++)
{
int id=s*blockDim.x+threadIdx.x;
float a=(id<N)?fabsf(VV[id]):0.0f;
float b=(id<N)?fabsf(cg[id]):0.0f;
sdata[tid]= max(sdata[tid], a);
tdata[tid]= max(tdata[tid], b);
}
__syncthreads();
/* do reduction in shared mem */
for(int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s) {sdata[tid]=max(sdata[tid], sdata[tid+s]);tdata[tid]=max(tdata[tid], tdata[tid+s]);}
__syncthreads();
}
if (tid < 32)
{
if (blockDim.x >= 64) { sdata[tid] =max(sdata[tid],sdata[tid + 32]);tdata[tid]=max(tdata[tid], tdata[tid+32]);}
if (blockDim.x >= 32) { sdata[tid] =max(sdata[tid],sdata[tid + 16]);tdata[tid]=max(tdata[tid], tdata[tid+16]);}
if (blockDim.x >= 16) { sdata[tid] =max(sdata[tid],sdata[tid + 8]);tdata[tid]=max(tdata[tid], tdata[tid+8]);}
if (blockDim.x >= 8) { sdata[tid] =max(sdata[tid],sdata[tid + 4]);tdata[tid]=max(tdata[tid], tdata[tid+4]);}
if (blockDim.x >= 4) { sdata[tid] =max(sdata[tid],sdata[tid + 2]);tdata[tid]=max(tdata[tid], tdata[tid+2]);}
if (blockDim.x >= 2) { sdata[tid] =max(sdata[tid],sdata[tid + 1]);tdata[tid]=max(tdata[tid], tdata[tid+1]);}
}
if (tid == 0) { if(tdata[0]>EPS) *epsil=0.01*sdata[0]/tdata[0]; else *epsil=0.0;}
}
//a#############################################################################################
__global__ void cuda_com2derr(float *com, float *derr, int nx, int ny, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt){
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nx*ny*nt) derr[id]=com[id];
}
}
}
//a#############################################################################################
__global__ void cuda_cal_vtmp(float *VVtmp, float *VV, float *cg, float epsil, int nnx, int nny, int nnz)
/*< calculate temporary velocity >*/
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz)
{
VVtmp[id] =VV[id] + epsil*cg[id];
}
}
}
//a#############################################################################################
__global__ void cuda_sum_alpha12(float *alpha1, float *alpha2, float *cal, float *obs, float *derr,
int nx, int ny, int nz, int nt)
{
const int it = blockIdx.x * blockDim.x + threadIdx.x;//0--nt's thread:it
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id, iy;
if(it<nt)
{
for(iy=0;iy<ny;iy++)
{
id=it+ix*nt+iy*nt*nx;
if (id<nx*ny*nt)
{
float c=derr[id];
float a=obs[id]+c;/* since f(mk)-dobs[id]=derr[id], thus f(mk)=b+c; */
float b=cal[id]-a;/* f(mk+epsil*cg)-f(mk) */
alpha1[ix+nx*iy]-=b*c;
alpha2[ix+nx*iy]+=b*b;
}
}
}
}
//a#############################################################################################
__global__ void cuda_cal_alpha(float *alpha, float *alpha1, float *alpha2, float epsil, int ng)
/*< calculate searched stepsize (alpha) according to Taratola's method
configuration requirement: <<<1, Block_Size>>> >*/
{
__shared__ float sdata[BlockSize];
__shared__ float tdata[BlockSize];
int tid=threadIdx.x;
sdata[tid]=0.0f;
tdata[tid]=0.0f;
for(int s=0; s<(ng+BlockSize-1)/BlockSize; s++)
{
int id=s*blockDim.x+threadIdx.x;
float a=(id<ng)?alpha1[id]:0.0f;
float b=(id<ng)?alpha2[id]:0.0f;
sdata[tid] +=a;
tdata[tid] +=b;
}
__syncthreads();
/* do reduction in shared mem */
for(int s=blockDim.x/2; s>32; s>>=1)
{
if (threadIdx.x < s) { sdata[tid] += sdata[tid + s];tdata[tid] += tdata[tid + s]; } __syncthreads();
}
if (tid < 32)
{
if (blockDim.x >= 64) { sdata[tid] += sdata[tid + 32]; tdata[tid] += tdata[tid + 32];}
if (blockDim.x >= 32) { sdata[tid] += sdata[tid + 16]; tdata[tid] += tdata[tid + 16];}
if (blockDim.x >= 16) { sdata[tid] += sdata[tid + 8]; tdata[tid] += tdata[tid + 8];}
if (blockDim.x >= 8) { sdata[tid] += sdata[tid + 4]; tdata[tid] += tdata[tid + 4];}
if (blockDim.x >= 4) { sdata[tid] += sdata[tid + 2]; tdata[tid] += tdata[tid + 2];}
if (blockDim.x >= 2) { sdata[tid] += sdata[tid + 1]; tdata[tid] += tdata[tid + 1];}
}
if (tid == 0)
{
if(tdata[0]>EPS) *alpha=epsil*sdata[0]/(tdata[0]+EPS);
else *alpha=0.0;
}
}
//a#############################################################################################
__global__ void cuda_update_vel(float *VV, float *cg, float alpha, int nnx, int nny, int nnz)
{
const int iz = blockIdx.x * blockDim.x + threadIdx.x;//0--nz's thread:iz
const int ix = blockIdx.y * blockDim.y + threadIdx.y;//0--nx's thread:ix
int id,iy;
for(iy=0;iy<nny;iy++)
{
id=iz+ix*nnz+iy*nnz*nnx;
if(id<nnx*nny*nnz) VV[id]=VV[id]+alpha*cg[id];
}
}
//a#############################################################################################
//a### ###
//a### Main Function ###
//a### ###
//a#############################################################################################
int main(int argc, char* argv[])
{
int nz, nx, ny, nnz, nnx, nny, ns, nsx, nt, it, is, fsz, fsx, fsy, dsz, dsx, dsy, ng, iter, niter;
int *coo_source, *coo_receivers, *coo_up, *coo_down, *coo_front, *coo_back, *coo_left, *coo_right;
float dz, dx, dy, favg, dt, _dz2, _dx2, _dy2, pfac;
float *v, *vv, *wavelet, *VV, *VVtmp, *s_P0, *s_P1, *ptr, *g_P0, *g_P1, *s_Ptt;
float *p_cal, *p_IO, *p_obs, *p_com, *s_bndr, *p_derr;
float *g0, *g1, *cg, *illum, *pars;
float obj1, obj, beta, epsil, alpha, *alpha1, *alpha2, *objval;
//a######################################
char FNvel[250]={"vel201202203initial.dat"};
char FNsobs[250]={"shot_obs.dat"};
char FNscal[250]={"shot_cal.dat"};
char FNscom[250]={"shot_com.dat"};
char FNgrad[250]={"gradient.dat"};
char FNillum[250]={"illum.dat"};
char FNupdatevel[250]={"velupdate.dat"};
char FNlastvel[250]={"vellastIter.dat"};
char FNobjs[250]={"objections.txt"};
//a######################################
nx=201; dx=10;
ny=1; dy=10;
nz=203; dz=10;
nt=1501; favg=20; pfac=100;
dt=0.001;
ns=5; nsx=5;
fsx=10; dsx=40;
fsy=1; dsy=0;
fsz=1; dsz=0;
niter=50;
//a######################################
FILE *fpvel, *fpscal, *fpsobs, *fpgrad, *fpscom, *fpillum, *fpupdatevel, *fplastvel, *fpobjs;
if((fpvel=fopen(FNvel,"rb"))==NULL){printf("### < %s > read error!\n",FNvel);exit(0);}
if((fpsobs=fopen(FNsobs,"rb"))==NULL){printf("### < %s > read error!\n",FNsobs);exit(0);}
fpscal=fopen(FNscal,"wb");
fpscom=fopen(FNscom,"wb");
fpgrad=fopen(FNgrad,"wb");
fpillum=fopen(FNillum,"wb");
fpupdatevel=fopen(FNupdatevel,"wb");
fplastvel=fopen(FNlastvel,"wb");
fpobjs=fopen(FNobjs,"w");
//a######################################
_dz2=1.0/(dz*dz);
_dx2=1.0/(dx*dx);
_dy2=1.0/(dy*dy);
nnz=nz+2*mm+2*npd;
nnx=nx+2*mm+2*npd;
nny=ny+2*mm+2*npd;
ng=nx*ny;
//a######################################
v=(float*)malloc(nz*nx*ny*sizeof(float));
vv=(float*)malloc(nnz*nnx*nny*sizeof(float));
p_IO=(float*)malloc(ng*nt*sizeof(float));
objval=(float*)malloc(niter*sizeof(float));
memset(p_IO, 0, ng*nt*sizeof(float));
memset(objval, 0, niter*sizeof(float));
fread(v, sizeof(float), nz*nx*ny, fpvel);
velocity_transform(v, vv, dt, nz, nx, ny);
/*< initialize device, default device=0 >*/
cudaSetDevice(0);
check_gpu_error("Failed to initialize device!");
dim3 dimg, dimb, dimt;
dimg.x=(nz+2*npd+2*mm+BlockSize1-1)/BlockSize1;
dimg.y=(nx+2*npd+2*mm+BlockSize2-1)/BlockSize2;
dimt.x=(nt+BlockSize1-1)/BlockSize1;
dimt.y=(nx+BlockSize2-1)/BlockSize2;
dimb.x=BlockSize1;
dimb.y=BlockSize2;
/* allocate memory on device */
/*< wavelet & velocity >*/
cudaMalloc(&wavelet, nt*sizeof(float));
cudaMalloc(&VV, nnz*nnx*nny*sizeof(float));
cudaMalloc(&VVtmp, nnz*nnx*nny*sizeof(float));
/*< forward & backward & receivers wavefield >*/
cudaMalloc(&s_P0, nnz*nnx*nny*sizeof(float));
cudaMalloc(&s_P1, nnz*nnx*nny*sizeof(float));
cudaMalloc(&g_P0, nnz*nnx*nny*sizeof(float));
cudaMalloc(&g_P1, nnz*nnx*nny*sizeof(float));
cudaMalloc(&s_Ptt, nnz*nnx*nny*sizeof(float));
/*< shot & receivers location >*/
cudaMalloc(&coo_source, ns*sizeof(int));
cudaMalloc(&coo_receivers, ng*sizeof(int));
/*< boundary location >*/
cudaMalloc(&coo_up , nx*ny*sizeof(int));
cudaMalloc(&coo_down , nx*ny*sizeof(int));
cudaMalloc(&coo_front, nx*nz*sizeof(int));
cudaMalloc(&coo_back , nx*nz*sizeof(int));
cudaMalloc(&coo_left , ny*nz*sizeof(int));
cudaMalloc(&coo_right, ny*nz*sizeof(int));
/*< calculated/synthetic seismic data (it & nt & 6's boundary) >*/
cudaMalloc(&p_cal, ng*nt*sizeof(float));
cudaMalloc(&p_obs, ng*nt*sizeof(float));
cudaMalloc(&p_com, ng*nt*sizeof(float));
cudaMalloc(&p_derr, ng*nt*sizeof(float));
cudaMalloc(&alpha1, ng*sizeof(float));
cudaMalloc(&alpha2, ng*sizeof(float));
cudaMalloc(&s_bndr, nt*(2*nz*nx+2*nz*ny+2*nx*ny)*sizeof(float));
/*< The is & ns gradient ,lighting matrix >*/
cudaMalloc(&g0, nnz*nnx*nny*sizeof(float));
cudaMalloc(&g1, nnz*nnx*nny*sizeof(float));
cudaMalloc(&cg, nnz*nnx*nny*sizeof(float));
cudaMalloc(&illum, nnz*nnx*nny*sizeof(float));
cudaMemset(g1, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(cg, 0, nnz*nnx*nny*sizeof(float));
/* d_pars[0]: obj; d_pars[1]: beta; d_pars[2]: epsilon; d_pars[3]: alpha; */
cudaMalloc(&pars, 4*sizeof(float));
cudaMemset(pars, 0, 4*sizeof(float));
check_gpu_error("Failed to allocate memory for variables!");
cuda_ricker_wavelet<<<(nt+BlockSize-1)/BlockSize, BlockSize>>>(wavelet, favg, dt, nt, pfac);
cudaMemcpy(VV, vv, nnz*nnx*nny*sizeof(float), cudaMemcpyHostToDevice);
/*< shot location >*/
cuda_set_s<<<1, ns>>>(coo_source, fsz, fsx, fsy, dsz, dsx, dsy, ns, nsx, nz, nx, ny);
/*< receivers(up),down,front,back,left,right location >*/
cuda_set_up_do<<<(nx*ny+BlockSize-1)/BlockSize,BlockSize>>>(coo_receivers, coo_up,coo_down, nx*ny, nz, nx, ny);
cuda_set_fr_ba<<<(nz*nx+BlockSize-1)/BlockSize,BlockSize>>>(coo_front, coo_back, nz*nx, nz, nx, ny);
cuda_set_le_ri<<<(nz*ny+BlockSize-1)/BlockSize,BlockSize>>>(coo_left, coo_right, nz*ny, nz, nx, ny);
clock_t iter_t0, iter_t1, is_t0, is_t1, ns_t0, ns_t1;
printf("##########################################\n");
printf("###\n");
for(iter=0; iter<niter; iter++)
{
iter_t0=clock();
printf("########## Iter =%3d ########## \n###\n",iter+1);
cudaMemcpy(g0, g1, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemset(g1, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(illum, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(p_derr, 0, ng*nt*sizeof(float));
cudaMemset(alpha1, 0, ng*sizeof(float));
cudaMemset(alpha2, 0, ng*sizeof(float));
cudaMemset(pars, 0, 4*sizeof(float));
rewind(fpscal);
rewind(fpsobs);
rewind(fpscom);
rewind(fpillum);
rewind(fpgrad);
ns_t0=clock();
for(is=0; is<ns; is++)
{
is_t0=clock();
cudaMemset(s_P0, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(s_P1, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(g_P0, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(g_P1, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(s_Ptt, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(p_cal, 0, ng*nt*sizeof(float));
cudaMemset(p_obs, 0, ng*nt*sizeof(float));
cudaMemset(p_com, 0, ng*nt*sizeof(float));
cudaMemset(s_bndr, 0, nt*(2*nz*nx+2*nz*ny+2*nx*ny)*sizeof(float));
for(it=0; it<nt; it++)
{
//if(it%400==0) printf("For: is=%2d, it=%d\n",is,it);
cuda_add_source<<<1,1>>>(true, s_P1, &wavelet[it], &coo_source[is], 1);
cuda_step_fd3d<<<dimg,dimb>>>(s_P0, s_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
cuda_absorb_bndr<<<dimg,dimb>>>(s_P0, s_P1, nz, nx, ny, -0.25);
cuda_save_bndr<<<((2*nz*nx+2*nz*ny+2*nx*ny)+BlockSize-1)/BlockSize,BlockSize>>>(
&s_bndr[it*(2*nz*nx+2*nz*ny+2*nx*ny)],
s_P0, coo_front, coo_back, coo_left, coo_right, coo_up, coo_down,
nz, nx, ny, true);
cuda_cal_illum<<<dimg,dimb>>>(illum, s_P0, nz, nx, ny);
cuda_record<<<(ng+BlockSize-1)/BlockSize, BlockSize>>>(s_P0, p_cal, coo_receivers, ng, it, nt, true);
}//it loop end
cudaMemcpy(p_IO, p_cal, ng*nt*sizeof(float), cudaMemcpyDeviceToHost);
fwrite(p_IO, sizeof(float), ng*nt, fpscal);
fseek(fpsobs,is*ng*nt*sizeof(float),0);
fread(p_IO, sizeof(float), ng*nt, fpsobs);
cudaMemcpy(p_obs, p_IO, ng*nt*sizeof(float), cudaMemcpyHostToDevice);
cuda_cal_residuals<<<dimt, dimb>>>(&pars[0], p_cal, p_obs, p_com, ng*nt, nx, ny, nt);
if(is==0)cuda_com2derr<<<dimt, dimb>>>(p_com, p_derr, nx, ny, nt);
cudaMemcpy(&obj, &pars[0], sizeof(float), cudaMemcpyDeviceToHost);
if(is==(ns/2+1)){ cudaMemcpy(p_IO, p_com, ng*nt*sizeof(float), cudaMemcpyDeviceToHost);
fseek(fpscom,is*ng*nt*sizeof(float),0);
fwrite(p_IO, sizeof(float), ng*nt, fpscom); }
for(it=nt-1; it>-1; it--)
{
//if(it%400==0) printf("Back: is=%2d, it=%d\n",is,it);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
cuda_save_bndr<<<((2*nz*nx+2*nz*ny+2*nx*ny)+BlockSize-1)/BlockSize,BlockSize>>>(
&s_bndr[it*(2*nz*nx+2*nz*ny+2*nx*ny)],
s_P1, coo_front, coo_back, coo_left, coo_right, coo_up, coo_down,
nz, nx, ny, false);
cuda_step_fd3d<<<dimg,dimb>>>(s_P0, s_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, s_Ptt, true);
cuda_absorb_bndr<<<dimg,dimb>>>(s_P0, s_P1, nz, nx, ny, -0.25);
cuda_record<<<(ng+BlockSize-1)/BlockSize, BlockSize>>>(g_P1, p_com, coo_receivers, ng, it, nt, false);
cuda_step_fd3d<<<dimg,dimb>>>(g_P0, g_P1, VV, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=g_P0; g_P0=g_P1; g_P1=ptr;
cuda_absorb_bndr<<<dimg,dimb>>>(g_P0, g_P1, nz, nx, ny, -0.25);
cuda_cal_illum<<<dimg,dimb>>>(illum, g_P1, nz, nx, ny);
cuda_cal_g1<<<dimg,dimb>>>(g1, s_Ptt, g_P1, nz, nx, ny);
}// it loop end
is_t1=clock();
printf("### IS:(%2d) %.2f(min);\n",is,((float)(is_t1-is_t0))/60000000.0);
}//IS loop end
ns_t1=clock();
printf("### Cal gradient: %.2f (min)\n",((float)(ns_t1-ns_t0))/60000000.0);
cudaMemcpy(vv, illum, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float), nz*nx*ny, fpillum);
/* compute the gradient of FWI by scaling, precondition incorporated here */
cuda_scale_gradient<<<dimg,dimb>>>(g1, VV, illum, nnx, nny, nnz, true);
/* Gaussian smoothing for the sharp gradient */
cuda_bell_smoothz<<<dimg,dimb>>>(g1, 2, nnx, nny, nnz);
cuda_bell_smoothx<<<dimg,dimb>>>(g1, 2, nnx, nny, nnz);
cuda_bell_smoothy<<<dimg,dimb>>>(g1, 2, nnx, nny, nnz);
/* calculate the factor beta in conjugate gradient method */
if (iter>0)
{
cuda_cal_beta_step1<<<dimg,dimb>>>(g0, g1, cg, nnx, nny, nnz);
cuda_cal_beta_step2<<<1,1>>>(&pars[1], nnx, nny, nnz);
}
cudaMemcpy(&beta, &pars[1], sizeof(float), cudaMemcpyDeviceToHost);
/* compute the conjugate gradient */
cuda_cal_conjgrad<<<dimg,dimb>>>(g1, cg, beta, nnx, nny, nnz);
cudaMemcpy(vv, cg, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float), nz*nx*ny, fpgrad);
/* estimate epsilon according to equation 11 */
cuda_cal_epsilon<<<1, BlockSize>>>(VV, cg, &pars[2], nnx*nnz*nny);
cudaMemcpy(&epsil, &pars[2], sizeof(float), cudaMemcpyDeviceToHost);
/* obtain a tentative velocity model to estimate a good stepsize alpha */
cuda_cal_vtmp<<<dimg,dimb>>>(VVtmp, VV, cg, epsil, nnx, nny, nnz);
ns_t0=clock();
printf("### Cal alpha:");
for(is=0; is<1; is++)
{
cudaMemset(s_P0, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(s_P1, 0, nnz*nnx*nny*sizeof(float));
cudaMemset(p_cal, 0, ng*nt*sizeof(float));
fseek(fpsobs,is*ng*nt*sizeof(float),0);
fread(p_IO, sizeof(float), ng*nt, fpsobs);
cudaMemcpy(p_obs, p_IO, ng*nt*sizeof(float), cudaMemcpyHostToDevice);
for(it=0; it<nt; it++)
{
cuda_add_source<<<1,1>>>(true, s_P1, &wavelet[it], &coo_source[is], 1);
cuda_step_fd3d<<<dimg,dimb>>>(s_P0, s_P1, VVtmp, _dz2, _dx2, _dy2, nz, nx, ny, dt, NULL, false);
ptr=s_P0; s_P0=s_P1; s_P1=ptr;
cuda_absorb_bndr<<<dimg,dimb>>>(s_P0, s_P1, nz, nx, ny, -0.25);
cuda_record<<<(ng+BlockSize-1)/BlockSize, BlockSize>>>(s_P0, p_cal, coo_receivers, ng, it, nt, true);
}//it loop end
cuda_sum_alpha12<<<dimt, dimb>>>(alpha1, alpha2, p_cal, p_obs, p_derr, nx, ny, nz, nt);
}//is loop end
cuda_cal_alpha<<<1,BlockSize>>>(&pars[3], alpha1, alpha2, epsil, ng);
cudaMemcpy(&alpha, &pars[3], sizeof(float), cudaMemcpyDeviceToHost);
ns_t1=clock();printf(" %.2f (min)\n",((float)(ns_t1-ns_t0))/60000000.0);
/* update the velocity model according to previous velocity, conjugate gradient and estimated stepsize */
cuda_update_vel<<<dimg,dimb>>>(VV, cg, alpha, nnx, nny, nnz);
cudaMemcpy(vv, VV, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
fwrite(v, sizeof(float),nz*nx*ny, fpupdatevel);
/* compute the normalized objective function */
if(iter==0) {obj1=obj; objval[iter]=1.0;}
else objval[iter]=obj/obj1;
iter_t1=clock();
printf("### objval=%f, beta=%f, epsil=%.2f, alpha=%.2f : %.2f(min)\n",
objval[iter],beta,epsil,alpha,((float)(iter_t1-iter_t0))/60000000.0);
fprintf(fpobjs,"iter=%3d, obj=%f;\n",iter+1,objval[iter]);
cudaMemcpy(vv, VV, nnz*nnx*nny*sizeof(float), cudaMemcpyDeviceToHost);
window3d(v, vv, nz, nx, ny);
rewind(fplastvel);
fwrite(v, sizeof(float),nz*nx*ny, fplastvel);
}//iter loop end
printf("##################################\n");
/* free memory on device */
cudaFree(wavelet);
cudaFree(VV);
cudaFree(VVtmp);
/*< wavefield(x-y-z) >*/
cudaFree(s_P0);
cudaFree(s_P1);
cudaFree(g_P0);
cudaFree(g_P1);
cudaFree(s_Ptt);
/*< location >*/
cudaFree(coo_source);
cudaFree(coo_receivers);
cudaFree(coo_front);
cudaFree(coo_back);
cudaFree(coo_left);
cudaFree(coo_right);
cudaFree(coo_down);
cudaFree(coo_up);
/*< gradient >*/
cudaFree(g0);
cudaFree(g1);
cudaFree(cg);
cudaFree(illum);
cudaFree(pars);
/*< wavefield(t-x-y-z) >*/
cudaFree(p_cal);
cudaFree(p_obs);
cudaFree(p_com);
cudaFree(p_derr);
cudaFree(alpha1);
cudaFree(alpha2);
cudaFree(s_bndr);
/*< free alloc >*/
free(v);
free(vv);
free(p_IO);
free(objval);
fclose(fpvel);
fclose(fpscal);
fclose(fpsobs);
fclose(fpscom);
fclose(fpgrad);
fclose(fpillum);
fclose(fpupdatevel);
fclose(fplastvel);
fclose(fpobjs);
exit (0);
}
|
ebfa4c441e2ee07060fdbd3517ffa6a14f2b8f39.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "wp_cuda.h"
hipDoubleComplex *dev_psi;
hipDoubleComplex *dev_psin1;
hipDoubleComplex *dev_psin2;
double *dev_x;
double *dev_dens;
double *dev_Vx;
UNINT Ncores;
__global__ void propagate_psi(hipDoubleComplex *psin_grid,
hipDoubleComplex *psi_grid, double *Vx_grid,
int n_grid, double u_term, double dt){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
hipDoubleComplex idt = make_cuDoubleComplex(0.0e0,-dt);
hipDoubleComplex u2VC;
hipDoubleComplex uC;
hipDoubleComplex s1;
hipDoubleComplex s2;
hipDoubleComplex aux1;
if (ind == 1){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = psi_grid[ind+1];
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
else if (ind == n_grid-2){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = psi_grid[ind-1];
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
else if (ind > 1 && ind < n_grid-2){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = cuCadd(psi_grid[ind-1], psi_grid[ind+1]);
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
return;
}
__global__ void update_psi(hipDoubleComplex *psi_grid,
hipDoubleComplex *psin1_grid,
hipDoubleComplex *psin2_grid,
int n_grid){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
hipDoubleComplex aux1;
hipDoubleComplex aux2;
hipDoubleComplex f_half= make_cuDoubleComplex(0.5e0,0.0e0);
if (ind > 0 && ind < n_grid-1){
aux1 = cuCmul(f_half, psin2_grid[ind]);
aux2 = cuCadd(psin1_grid[ind],aux1);
psi_grid[ind] = cuCadd(psi_grid[ind],aux2);
}
else if (ind == 0 || ind == n_grid-1){
psi_grid[ind] = make_cuDoubleComplex(0.0e0,0.0e0);
}
return;
}
__global__ void calc_dens(hipDoubleComplex *psi_grid, double *dens_grid,
int n_grid){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
double vabs;
if (ind > 0 && ind < n_grid){
vabs = cuCabs(psi_grid[ind]);
dens_grid[ind] = pow(vabs,2.0);
}
return;
}
void init_cuda_subs(complex<double> *psi_grid, double *x_grid,
complex<double> *psin1_grid,
complex<double> *psin2_grid,
double *dens_grid,
double *Vx_grid, UNINT n_grid){
double gaux = (double) n_grid;
double taux = (double) Nthreads;
Ncores = (UNINT) ceil(gaux/taux);
hipMalloc((void**) &dev_psi , n_grid * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_psin1 , n_grid * sizeof(hipDoubleComplex));
hipMalloc((void**) &dev_psin2 , n_grid * sizeof(hipDoubleComplex));
hipMalloc((void**) &x_grid , n_grid * sizeof(double));
hipMalloc((void**) &dev_dens , n_grid * sizeof(double));
hipMalloc((void**) &dev_Vx , n_grid * sizeof(double));
hipMemcpy(dev_psi, psi_grid, n_grid * sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_psin1, psin1_grid, n_grid * sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_psin2, psin2_grid, n_grid * sizeof(hipDoubleComplex),
hipMemcpyHostToDevice);
hipMemcpy(dev_x, x_grid, n_grid * sizeof(double), hipMemcpyHostToDevice);
hipMemcpy(dev_dens, dens_grid, n_grid * sizeof(double),
hipMemcpyHostToDevice);
hipMemcpy(dev_Vx, Vx_grid, n_grid * sizeof(double),
hipMemcpyHostToDevice);
return;
}
void propagate_cuda(UNINT n_grid, double mass, double dx, double dt){
double u_term = 0.5*(1.0/(mass*pow(dx,2.0)));
hipLaunchKernelGGL(( propagate_psi), dim3(Ncores), dim3(Nthreads), 0, 0, dev_psin1, dev_psi, dev_Vx, n_grid,
u_term, dt);
hipLaunchKernelGGL(( propagate_psi), dim3(Ncores), dim3(Nthreads), 0, 0, dev_psin2, dev_psin1, dev_Vx, n_grid,
u_term, dt);
hipLaunchKernelGGL(( update_psi), dim3(Ncores), dim3(Nthreads), 0, 0, dev_psi, dev_psin1,dev_psin2, n_grid);
return;
}
void get_cuda_psi2(complex<double> *psi_grid, double *dens_grid, int n_grid){
hipLaunchKernelGGL(( calc_dens), dim3(Ncores), dim3(Nthreads), 0, 0, dev_psi, dev_dens, n_grid);
hipMemcpy(dens_grid, dev_dens, n_grid*sizeof(double),
hipMemcpyDeviceToHost);
hipMemcpy(psi_grid, dev_psi, n_grid*sizeof(hipDoubleComplex),
hipMemcpyDeviceToHost);
return;
}
void free_cuda_memory(){
hipFree(dev_psi);
hipFree(dev_psin1);
hipFree(dev_psin2);
hipFree(dev_x);
hipFree(dev_dens);
hipFree(dev_Vx);
return;
}
| ebfa4c441e2ee07060fdbd3517ffa6a14f2b8f39.cu | #include "wp_cuda.h"
cuDoubleComplex *dev_psi;
cuDoubleComplex *dev_psin1;
cuDoubleComplex *dev_psin2;
double *dev_x;
double *dev_dens;
double *dev_Vx;
UNINT Ncores;
__global__ void propagate_psi(cuDoubleComplex *psin_grid,
cuDoubleComplex *psi_grid, double *Vx_grid,
int n_grid, double u_term, double dt){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
cuDoubleComplex idt = make_cuDoubleComplex(0.0e0,-dt);
cuDoubleComplex u2VC;
cuDoubleComplex uC;
cuDoubleComplex s1;
cuDoubleComplex s2;
cuDoubleComplex aux1;
if (ind == 1){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = psi_grid[ind+1];
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
else if (ind == n_grid-2){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = psi_grid[ind-1];
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
else if (ind > 1 && ind < n_grid-2){
u2VC = make_cuDoubleComplex(2*u_term+Vx_grid[ind],0.0e0);
uC = make_cuDoubleComplex(-u_term,0.0e0);
s1 = cuCadd(psi_grid[ind-1], psi_grid[ind+1]);
s1 = cuCmul(uC, s1);
s2 = cuCmul(u2VC, psi_grid[ind]);
aux1 = cuCadd(s1,s2);
aux1 = cuCmul(idt,aux1);
psin_grid[ind] = aux1;
}
return;
}
__global__ void update_psi(cuDoubleComplex *psi_grid,
cuDoubleComplex *psin1_grid,
cuDoubleComplex *psin2_grid,
int n_grid){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
cuDoubleComplex aux1;
cuDoubleComplex aux2;
cuDoubleComplex f_half= make_cuDoubleComplex(0.5e0,0.0e0);
if (ind > 0 && ind < n_grid-1){
aux1 = cuCmul(f_half, psin2_grid[ind]);
aux2 = cuCadd(psin1_grid[ind],aux1);
psi_grid[ind] = cuCadd(psi_grid[ind],aux2);
}
else if (ind == 0 || ind == n_grid-1){
psi_grid[ind] = make_cuDoubleComplex(0.0e0,0.0e0);
}
return;
}
__global__ void calc_dens(cuDoubleComplex *psi_grid, double *dens_grid,
int n_grid){
int ind = threadIdx.x + blockIdx.x * blockDim.x;
double vabs;
if (ind > 0 && ind < n_grid){
vabs = cuCabs(psi_grid[ind]);
dens_grid[ind] = pow(vabs,2.0);
}
return;
}
void init_cuda_subs(complex<double> *psi_grid, double *x_grid,
complex<double> *psin1_grid,
complex<double> *psin2_grid,
double *dens_grid,
double *Vx_grid, UNINT n_grid){
double gaux = (double) n_grid;
double taux = (double) Nthreads;
Ncores = (UNINT) ceil(gaux/taux);
cudaMalloc((void**) &dev_psi , n_grid * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_psin1 , n_grid * sizeof(cuDoubleComplex));
cudaMalloc((void**) &dev_psin2 , n_grid * sizeof(cuDoubleComplex));
cudaMalloc((void**) &x_grid , n_grid * sizeof(double));
cudaMalloc((void**) &dev_dens , n_grid * sizeof(double));
cudaMalloc((void**) &dev_Vx , n_grid * sizeof(double));
cudaMemcpy(dev_psi, psi_grid, n_grid * sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_psin1, psin1_grid, n_grid * sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_psin2, psin2_grid, n_grid * sizeof(cuDoubleComplex),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_x, x_grid, n_grid * sizeof(double), cudaMemcpyHostToDevice);
cudaMemcpy(dev_dens, dens_grid, n_grid * sizeof(double),
cudaMemcpyHostToDevice);
cudaMemcpy(dev_Vx, Vx_grid, n_grid * sizeof(double),
cudaMemcpyHostToDevice);
return;
}
void propagate_cuda(UNINT n_grid, double mass, double dx, double dt){
double u_term = 0.5*(1.0/(mass*pow(dx,2.0)));
propagate_psi<<<Ncores, Nthreads>>>(dev_psin1, dev_psi, dev_Vx, n_grid,
u_term, dt);
propagate_psi<<<Ncores, Nthreads>>>(dev_psin2, dev_psin1, dev_Vx, n_grid,
u_term, dt);
update_psi<<<Ncores, Nthreads>>>(dev_psi, dev_psin1,dev_psin2, n_grid);
return;
}
void get_cuda_psi2(complex<double> *psi_grid, double *dens_grid, int n_grid){
calc_dens<<<Ncores, Nthreads>>>(dev_psi, dev_dens, n_grid);
cudaMemcpy(dens_grid, dev_dens, n_grid*sizeof(double),
cudaMemcpyDeviceToHost);
cudaMemcpy(psi_grid, dev_psi, n_grid*sizeof(cuDoubleComplex),
cudaMemcpyDeviceToHost);
return;
}
void free_cuda_memory(){
cudaFree(dev_psi);
cudaFree(dev_psin1);
cudaFree(dev_psin2);
cudaFree(dev_x);
cudaFree(dev_dens);
cudaFree(dev_Vx);
return;
}
|
5368f72a91fb873b91f7440474b66526ed7aa1bb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "THHUNN.h"
#include "THHDeviceTensor.cuh"
#include "THHDeviceTensorUtils.cuh"
#include "THHDeviceUtils.cuh"
#include "THHReduceApplyUtils.cuh"
__global__ void VolumetricReplicationPadding_updateOutput(
THCDeviceTensor<float, 5> input,
THCDeviceTensor<float, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.getSize(2) * output.getSize(3) *
output.getSize(4))) {
return;
}
int outputPointX = outputPointId % output.getSize(4);
int outputPointY = (outputPointId / output.getSize(4)) % output.getSize(3);
int outputPointZ = outputPointId / (output.getSize(3) * output.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
input.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
input.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
input.getSize(2) + pfront - 1) - oStartZ + iStartZ;
float valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
void THNN_CudaVolumetricReplicationPadding_updateOutput(THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numBatch = 1;
int numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 4 || numInputDims == 5, 2,
"input must be 4 or 5-dimensional");
if (numInputDims == 5) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = THCudaTensor_size(state, input, planeDim);
int inputD = THCudaTensor_size(state, input, dimd);
int inputH = THCudaTensor_size(state, input, dimh);
int inputW = THCudaTensor_size(state, input, dimw);
int outputD = inputD + pfront + pback;
int outputH = inputH + ptop + pbottom;
int outputW = inputW + pleft + pright;
THCDeviceTensor<float, 5> devInput;
THCDeviceTensor<float, 5> devOutput;
if (numInputDims == 4) {
THCudaTensor_resize4d(state, output, numPlanes, outputD, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input).upcastOuter<5>();
devOutput = toDeviceTensor<float, 4>(state, output).upcastOuter<5>();
} else {
THCudaTensor_resize5d(state, output, numBatch, numPlanes, outputD, outputH,
outputW);
devInput = toDeviceTensor<float, 5>(state, input);
devOutput = toDeviceTensor<float, 5>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3) *
devOutput.getSize(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( VolumetricReplicationPadding_updateOutput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
__global__ void VolumetricReplicationPadding_updateGradInput(
THCDeviceTensor<float, 5> gradInput,
THCDeviceTensor<float, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.getSize(2) * gradOutput.getSize(3) *
gradOutput.getSize(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(4);
int outputPointY = (outputPointId / gradOutput.getSize(4)) %
gradOutput.getSize(3);
int outputPointZ = outputPointId / (gradOutput.getSize(3) *
gradOutput.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
gradInput.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
gradInput.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
gradInput.getSize(2) + pfront - 1) - oStartZ + iStartZ;
float valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
void THNN_CudaVolumetricReplicationPadding_updateGradInput(
THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, int pleft, int pright, int ptop, int pbottom,
int pfront, int pback) {
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, gradOutput),
3, "output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 5> devGradInput;
THCDeviceTensor<float, 5> devGradOutput;
if (numInputDims == 4) {
devGradInput = toDeviceTensor<float, 4>(state, gradInput).upcastOuter<5>();
devGradOutput =
toDeviceTensor<float, 4>(state, gradOutput).upcastOuter<5>();
} else {
devGradInput = toDeviceTensor<float, 5>(state, gradInput);
devGradOutput = toDeviceTensor<float, 5>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3) *
devGradOutput.getSize(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
hipLaunchKernelGGL(( VolumetricReplicationPadding_updateGradInput), dim3(gridSize), dim3(blockSize), 0, THCState_getCurrentStream(state),
devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
| 5368f72a91fb873b91f7440474b66526ed7aa1bb.cu | #include "THCUNN.h"
#include "THCDeviceTensor.cuh"
#include "THCDeviceTensorUtils.cuh"
#include "THCDeviceUtils.cuh"
#include "THCReduceApplyUtils.cuh"
__global__ void VolumetricReplicationPadding_updateOutput(
THCDeviceTensor<float, 5> input,
THCDeviceTensor<float, 5> output,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (output.getSize(2) * output.getSize(3) *
output.getSize(4))) {
return;
}
int outputPointX = outputPointId % output.getSize(4);
int outputPointY = (outputPointId / output.getSize(4)) % output.getSize(3);
int outputPointZ = outputPointId / (output.getSize(3) * output.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
input.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
input.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
input.getSize(2) + pfront - 1) - oStartZ + iStartZ;
float valueToCopy =
input[batch][plane][inputPointZ][inputPointY][inputPointX];
output[batch][plane][outputPointZ][outputPointY][outputPointX] = valueToCopy;
}
void THNN_CudaVolumetricReplicationPadding_updateOutput(THCState *state,
THCudaTensor *input,
THCudaTensor *output,
int pleft, int pright,
int ptop, int pbottom,
int pfront, int pback) {
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numBatch = 1;
int numInputDims = THCudaTensor_nDimension(state, input);
THArgCheck(numInputDims == 4 || numInputDims == 5, 2,
"input must be 4 or 5-dimensional");
if (numInputDims == 5) {
numBatch = THCudaTensor_size(state, input, 0);
planeDim++;
dimd++;
dimh++;
dimw++;
}
int numPlanes = THCudaTensor_size(state, input, planeDim);
int inputD = THCudaTensor_size(state, input, dimd);
int inputH = THCudaTensor_size(state, input, dimh);
int inputW = THCudaTensor_size(state, input, dimw);
int outputD = inputD + pfront + pback;
int outputH = inputH + ptop + pbottom;
int outputW = inputW + pleft + pright;
THCDeviceTensor<float, 5> devInput;
THCDeviceTensor<float, 5> devOutput;
if (numInputDims == 4) {
THCudaTensor_resize4d(state, output, numPlanes, outputD, outputH, outputW);
devInput = toDeviceTensor<float, 4>(state, input).upcastOuter<5>();
devOutput = toDeviceTensor<float, 4>(state, output).upcastOuter<5>();
} else {
THCudaTensor_resize5d(state, output, numBatch, numPlanes, outputD, outputH,
outputW);
devInput = toDeviceTensor<float, 5>(state, input);
devOutput = toDeviceTensor<float, 5>(state, output);
}
int outputPlaneSize = devOutput.getSize(2) * devOutput.getSize(3) *
devOutput.getSize(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devOutput.getSize(1),
devOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
VolumetricReplicationPadding_updateOutput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devInput, devOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
__global__ void VolumetricReplicationPadding_updateGradInput(
THCDeviceTensor<float, 5> gradInput,
THCDeviceTensor<float, 5> gradOutput,
int pfront, int pback, int ptop, int pbottom, int pleft, int pright) {
int outputPointId = threadIdx.x + blockIdx.x * blockDim.x;
int plane = blockIdx.y;
int batch = blockIdx.z;
if (outputPointId >= (gradOutput.getSize(2) * gradOutput.getSize(3) *
gradOutput.getSize(4))) {
return;
}
int outputPointX = outputPointId % gradOutput.getSize(4);
int outputPointY = (outputPointId / gradOutput.getSize(4)) %
gradOutput.getSize(3);
int outputPointZ = outputPointId / (gradOutput.getSize(3) *
gradOutput.getSize(4));
int iStartX = max(0, -pleft);
int iStartY = max(0, -ptop);
int iStartZ = max(0, -pfront);
int oStartX = max(0, pleft);
int oStartY = max(0, ptop);
int oStartZ = max(0, pfront);
int inputPointX = min(max(pleft, outputPointX),
gradInput.getSize(4) + pleft - 1) - oStartX + iStartX;
int inputPointY = min(max(ptop, outputPointY),
gradInput.getSize(3) + ptop - 1) - oStartY + iStartY;
int inputPointZ = min(max(pfront, outputPointZ),
gradInput.getSize(2) + pfront - 1) - oStartZ + iStartZ;
float valueToCopy =
gradOutput[batch][plane][outputPointZ][outputPointY][outputPointX];
atomicAdd(&gradInput[batch][plane][inputPointZ][inputPointY][inputPointX],
valueToCopy);
}
void THNN_CudaVolumetricReplicationPadding_updateGradInput(
THCState *state, THCudaTensor *input, THCudaTensor *gradOutput,
THCudaTensor *gradInput, int pleft, int pright, int ptop, int pbottom,
int pfront, int pback) {
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, input), 2,
"input tensor must fit into 32-bit index math");
THArgCheck(TensorUtils<THCudaTensor>::canUse32BitIndexMath(state, gradOutput),
3, "output gradient tensor must fit into 32-bit index math");
int planeDim = 0;
int dimd = 1;
int dimh = 2;
int dimw = 3;
int numInputDims = THCudaTensor_nDimension(state, input);
if (numInputDims == 5) {
planeDim++;
dimd++;
dimh++;
dimw++;
}
THCudaTensor_resizeAs(state, gradInput, input);
THCudaTensor_zero(state, gradInput);
THCDeviceTensor<float, 5> devGradInput;
THCDeviceTensor<float, 5> devGradOutput;
if (numInputDims == 4) {
devGradInput = toDeviceTensor<float, 4>(state, gradInput).upcastOuter<5>();
devGradOutput =
toDeviceTensor<float, 4>(state, gradOutput).upcastOuter<5>();
} else {
devGradInput = toDeviceTensor<float, 5>(state, gradInput);
devGradOutput = toDeviceTensor<float, 5>(state, gradOutput);
}
int outputPlaneSize = devGradOutput.getSize(2) * devGradOutput.getSize(3) *
devGradOutput.getSize(4);
dim3 gridSize(THCCeilDiv(outputPlaneSize, 256),
devGradOutput.getSize(1),
devGradOutput.getSize(0));
dim3 blockSize(outputPlaneSize > 256 ? 256 : outputPlaneSize);
VolumetricReplicationPadding_updateGradInput<<<gridSize, blockSize, 0, THCState_getCurrentStream(state)>>>(
devGradInput, devGradOutput, pfront, pback, ptop, pbottom, pleft, pright);
}
|
fb0926eccab1c452a53fd597fc04b479cae31164.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by Yurii Shyrma on 02.01.2018
//
#include <ops/declarable/helpers/stack.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/TAD.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void stackScalarsCuda(void* pVx, void* vz, const Nd4jLong* zShapeInfo) {
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[i]);
z[shape::getIndexOffset(i, zShapeInfo)] = *x;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void stackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
void* pVx, void* vz, const Nd4jLong* zShapeInfo) {
hipLaunchKernelGGL(( stackScalarsCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, pVx, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void stack_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) {
const int numOfSubArrs = inArrs.size();
NDArray::prepareSpecialUse({&output}, inArrs);
if(inArrs[0]->rankOf() == 0) {
std::vector<void const*> hInBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hInBuffers[i] = inArrs[i]->specialBuffer();
PointersManager manager(context, "helpers::stack cuda");
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
stackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, output.specialBuffer(), output.specialShapeInfo());
manager.synchronize();
}
else {
auto zTadPack = ConstantTadHelper::getInstance()->tadForDimensions(output.shapeInfo(), ShapeUtils::evalDimsToExclude(output.rankOf(), {dim}));
auto zTadShapeInfo = zTadPack.primaryShapeInfo();
for (uint i = 0; i < numOfSubArrs; ++i) {
void* zBuff = output.specialBufferWithOffset(zTadPack.primaryOffsets()[i]);
NativeOpExecutioner::execTransformAny(context, transform::Assign,
nullptr, inArrs[i]->shapeInfo(), inArrs[i]->specialBuffer(), inArrs[i]->specialShapeInfo(),
nullptr, zTadShapeInfo, zBuff, zTadPack.specialShapeInfo(),
nullptr, nullptr, nullptr, false/*allowParallelism*/);
}
}
NDArray::registerSpecialUse({&output}, inArrs);
}
////////////////////////////////////////////////////////////////////////
void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) {
BUILD_SINGLE_SELECTOR(output.dataType(), stack_, (context, inArrs, output, dim), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void stack_ , (sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void unstackScalarsCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz) {
const T* x = reinterpret_cast<const T*>(vx);
__shared__ Nd4jLong xLen, totalThreads;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < xLen; i += totalThreads) {
T* z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[i]);
*z = x[shape::getIndexOffset(i, xShapeInfo)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void unstackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* pVz) {
hipLaunchKernelGGL(( unstackScalarsCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, pVz);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void unstack_(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) {
const int numOfSubArrs = outArrs.size();
// NDArray::prepareSpecialUse(outArrs, {&input});
input.syncToDevice();
for (const auto a : outArrs)
a->getDataBuffer()->allocateSpecial();
if(outArrs[0]->rankOf() == 0) {
std::vector<void*> hOutBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hOutBuffers[i] = outArrs[i]->specialBuffer();
PointersManager manager(context, "helpers::unstack cuda");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
unstackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers);
manager.synchronize();
}
else {
auto xTadPack = ConstantTadHelper::getInstance()->tadForDimensions(input.shapeInfo(), ShapeUtils::evalDimsToExclude(input.rankOf(), {dim}));
auto xTadShapeInfo = xTadPack.primaryShapeInfo();
for (uint i = 0; i < numOfSubArrs; ++i) {
auto xBuff = input.specialBufferWithOffset(xTadPack.primaryOffsets()[i]);
NativeOpExecutioner::execTransformAny(input.getContext(), transform::Assign,
nullptr, xTadShapeInfo, xBuff, xTadPack.specialShapeInfo(),
nullptr, outArrs[i]->shapeInfo(), outArrs[i]->specialBuffer(), outArrs[i]->specialShapeInfo(),
nullptr, nullptr, nullptr, false/*allowParallelism*/);
}
}
// NDArray::registerSpecialUse(outArrs, {&input});
input.tickReadDevice();
for (const auto p : outArrs)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) {
BUILD_SINGLE_SELECTOR(input.dataType(), unstack_, (context, input, outArrs, dim), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void unstack_, (sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
// template <typename T>
// static __global__ void unstackCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
// const T* x = reinterpret_cast<const T*>(vx);
// __shared__ Nd4jLong xLen, totalThreads;
// __shared__ int xRank;
// if (threadIdx.x == 0) {
// xLen = shape::length(xShapeInfo);
// xRank = shape::rank(xShapeInfo);
// totalThreads = gridDim.x * blockDim.x;
// }
// __syncthreads();
// const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// Nd4jLong coords[MAX_RANK];
// for (uint64_t i = tid; i < xLen; i += totalThreads) {
// shape::index2coords(i, xShapeInfo, coords);
// const auto xOffset = shape::getOffset(xShapeInfo, coords);
// T *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis]]);
// for (uint j = axis; j < xRank - 1; ++j) // shift coords staring from axis position
// coords[j] = coords[j + 1];
// const auto zOffset = shape::getOffset(zTadShapeInfo, coords);
// z[zOffset] = x[xOffset];
// }
// }
// ///////////////////////////////////////////////////////////////////
// template<typename T>
// __host__ static void unstackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
// const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
// unstackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz, zTadShapeInfo, axis);
// }
// BUILD_SINGLE_TEMPLATE(template void unstackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES);
// ///////////////////////////////////////////////////////////////////
// void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<const NDArray*>& outArrs, const int axis) {
// const int threadsPerBlock = MAX_NUM_THREADS / 2;
// const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int numOfSubArrs = outArrs.size();
// std::vector<void*> hOutBuffers(numOfSubArrs);
// for(int i = 0; i < numOfSubArrs; ++i)
// hOutBuffers[i] = outArrs[i]->specialBuffer();
// PointersManager manager(context, "helpers::unstack");
// void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
// for(uint i = 0; i < numOfSubArrs; ++i)
// outArrs[i]->syncToDevice();
// input.syncToDevice();
// BUILD_SINGLE_SELECTOR(input.dataType(), unstackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers, outArrs[0]->specialShapeInfo(), axis), LIBND4J_TYPES);
// manager.synchronize();
// for(uint i = 0; i < numOfSubArrs; ++i)
// outArrs[i]->tickReadDevice();
// input.tickWriteDevice();
// }
// ///////////////////////////////////////////////////////////////////
// template <typename T>
// static __global__ void stackCuda(void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) {
// T* z = reinterpret_cast<T*>(vz);
// __shared__ Nd4jLong zLen, totalThreads;
// __shared__ int zRank;
// if (threadIdx.x == 0) {
// zLen = shape::length(zShapeInfo);
// zRank = shape::rank(zShapeInfo);
// totalThreads = gridDim.x * blockDim.x;
// }
// __syncthreads();
// const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// Nd4jLong coords[MAX_RANK];
// for (uint64_t i = tid; i < zLen; i += totalThreads) {
// shape::index2coords(i, zShapeInfo, coords);
// const auto zOffset = shape::getOffset(zShapeInfo, coords);
// const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[coords[axis]]);
// for (uint j = axis; j < zRank - 1; ++j) // shift coords staring from axis position
// coords[j] = coords[j + 1];
// const auto xOffset = shape::getOffset(xTadShapeInfo, coords);
// z[zOffset] = x[xOffset];
// }
// }
// ///////////////////////////////////////////////////////////////////
// template<typename T>
// __host__ static void stackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream,
// void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) {
// stackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, xTadShapeInfo, vz, zShapeInfo, axis);
// }
// BUILD_SINGLE_TEMPLATE(template void stackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis), LIBND4J_TYPES);
// ///////////////////////////////////////////////////////////////////
// void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) {
// const int threadsPerBlock = MAX_NUM_THREADS / 2;
// const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int numOfSubArrs = inArrs.size();
// std::vector<void*> hInBuffers(numOfSubArrs);
// for(int i = 0; i < numOfSubArrs; ++i)
// hInBuffers[i] = inArrs[i]->specialBuffer();
// PointersManager manager(context, "helpers::stack");
// void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
// for(uint i = 0; i < numOfSubArrs; ++i)
// inArrs[i]->syncToDevice();
// output.syncToDevice();
// BUILD_SINGLE_SELECTOR(output.dataType(), stackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, inArrs[0]->specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), axis), LIBND4J_TYPES);
// manager.synchronize();
// for(uint i = 0; i < numOfSubArrs; ++i)
// inArrs[i]->tickReadDevice();
// output.tickWriteDevice();
// }
}
}
}
| fb0926eccab1c452a53fd597fc04b479cae31164.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// Created by Yurii Shyrma on 02.01.2018
//
#include <ops/declarable/helpers/stack.h>
#include <helpers/ShapeUtils.h>
#include <array/ResultSet.h>
#include <exceptions/cuda_exception.h>
#include <helpers/TAD.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void stackScalarsCuda(void* pVx, void* vz, const Nd4jLong* zShapeInfo) {
T* z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong zLen, totalThreads;
if (threadIdx.x == 0) {
zLen = shape::length(zShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < zLen; i += totalThreads) {
const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[i]);
z[shape::getIndexOffset(i, zShapeInfo)] = *x;
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void stackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
void* pVx, void* vz, const Nd4jLong* zShapeInfo) {
stackScalarsCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void stack_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) {
const int numOfSubArrs = inArrs.size();
NDArray::prepareSpecialUse({&output}, inArrs);
if(inArrs[0]->rankOf() == 0) {
std::vector<void const*> hInBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hInBuffers[i] = inArrs[i]->specialBuffer();
PointersManager manager(context, "helpers::stack cuda");
void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
stackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, output.specialBuffer(), output.specialShapeInfo());
manager.synchronize();
}
else {
auto zTadPack = ConstantTadHelper::getInstance()->tadForDimensions(output.shapeInfo(), ShapeUtils::evalDimsToExclude(output.rankOf(), {dim}));
auto zTadShapeInfo = zTadPack.primaryShapeInfo();
for (uint i = 0; i < numOfSubArrs; ++i) {
void* zBuff = output.specialBufferWithOffset(zTadPack.primaryOffsets()[i]);
NativeOpExecutioner::execTransformAny(context, transform::Assign,
nullptr, inArrs[i]->shapeInfo(), inArrs[i]->specialBuffer(), inArrs[i]->specialShapeInfo(),
nullptr, zTadShapeInfo, zBuff, zTadPack.specialShapeInfo(),
nullptr, nullptr, nullptr, false/*allowParallelism*/);
}
}
NDArray::registerSpecialUse({&output}, inArrs);
}
////////////////////////////////////////////////////////////////////////
void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) {
BUILD_SINGLE_SELECTOR(output.dataType(), stack_, (context, inArrs, output, dim), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void stack_ , (sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void unstackScalarsCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz) {
const T* x = reinterpret_cast<const T*>(vx);
__shared__ Nd4jLong xLen, totalThreads;
if (threadIdx.x == 0) {
xLen = shape::length(xShapeInfo);
totalThreads = gridDim.x * blockDim.x;
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
for (Nd4jLong i = tid; i < xLen; i += totalThreads) {
T* z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[i]);
*z = x[shape::getIndexOffset(i, xShapeInfo)];
}
}
///////////////////////////////////////////////////////////////////
template<typename T>
__host__ static void unstackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
const void* vx, const Nd4jLong* xShapeInfo, void* pVz) {
unstackScalarsCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz);
}
///////////////////////////////////////////////////////////////////
template <typename T>
static void unstack_(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) {
const int numOfSubArrs = outArrs.size();
// NDArray::prepareSpecialUse(outArrs, {&input});
input.syncToDevice();
for (const auto a : outArrs)
a->getDataBuffer()->allocateSpecial();
if(outArrs[0]->rankOf() == 0) {
std::vector<void*> hOutBuffers(numOfSubArrs);
for(int i = 0; i < numOfSubArrs; ++i)
hOutBuffers[i] = outArrs[i]->specialBuffer();
PointersManager manager(context, "helpers::unstack cuda");
void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
const int threadsPerBlock = MAX_NUM_THREADS / 2;
const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
unstackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers);
manager.synchronize();
}
else {
auto xTadPack = ConstantTadHelper::getInstance()->tadForDimensions(input.shapeInfo(), ShapeUtils::evalDimsToExclude(input.rankOf(), {dim}));
auto xTadShapeInfo = xTadPack.primaryShapeInfo();
for (uint i = 0; i < numOfSubArrs; ++i) {
auto xBuff = input.specialBufferWithOffset(xTadPack.primaryOffsets()[i]);
NativeOpExecutioner::execTransformAny(input.getContext(), transform::Assign,
nullptr, xTadShapeInfo, xBuff, xTadPack.specialShapeInfo(),
nullptr, outArrs[i]->shapeInfo(), outArrs[i]->specialBuffer(), outArrs[i]->specialShapeInfo(),
nullptr, nullptr, nullptr, false/*allowParallelism*/);
}
}
// NDArray::registerSpecialUse(outArrs, {&input});
input.tickReadDevice();
for (const auto p : outArrs)
p->tickWriteDevice();
}
////////////////////////////////////////////////////////////////////////
void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) {
BUILD_SINGLE_SELECTOR(input.dataType(), unstack_, (context, input, outArrs, dim), LIBND4J_TYPES);
}
BUILD_SINGLE_TEMPLATE(template void unstack_, (sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim), LIBND4J_TYPES);
///////////////////////////////////////////////////////////////////
// template <typename T>
// static __global__ void unstackCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
// const T* x = reinterpret_cast<const T*>(vx);
// __shared__ Nd4jLong xLen, totalThreads;
// __shared__ int xRank;
// if (threadIdx.x == 0) {
// xLen = shape::length(xShapeInfo);
// xRank = shape::rank(xShapeInfo);
// totalThreads = gridDim.x * blockDim.x;
// }
// __syncthreads();
// const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// Nd4jLong coords[MAX_RANK];
// for (uint64_t i = tid; i < xLen; i += totalThreads) {
// shape::index2coords(i, xShapeInfo, coords);
// const auto xOffset = shape::getOffset(xShapeInfo, coords);
// T *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis]]);
// for (uint j = axis; j < xRank - 1; ++j) // shift coords staring from axis position
// coords[j] = coords[j + 1];
// const auto zOffset = shape::getOffset(zTadShapeInfo, coords);
// z[zOffset] = x[xOffset];
// }
// }
// ///////////////////////////////////////////////////////////////////
// template<typename T>
// __host__ static void unstackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
// const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) {
// unstackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz, zTadShapeInfo, axis);
// }
// BUILD_SINGLE_TEMPLATE(template void unstackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES);
// ///////////////////////////////////////////////////////////////////
// void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<const NDArray*>& outArrs, const int axis) {
// const int threadsPerBlock = MAX_NUM_THREADS / 2;
// const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int numOfSubArrs = outArrs.size();
// std::vector<void*> hOutBuffers(numOfSubArrs);
// for(int i = 0; i < numOfSubArrs; ++i)
// hOutBuffers[i] = outArrs[i]->specialBuffer();
// PointersManager manager(context, "helpers::unstack");
// void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*));
// for(uint i = 0; i < numOfSubArrs; ++i)
// outArrs[i]->syncToDevice();
// input.syncToDevice();
// BUILD_SINGLE_SELECTOR(input.dataType(), unstackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers, outArrs[0]->specialShapeInfo(), axis), LIBND4J_TYPES);
// manager.synchronize();
// for(uint i = 0; i < numOfSubArrs; ++i)
// outArrs[i]->tickReadDevice();
// input.tickWriteDevice();
// }
// ///////////////////////////////////////////////////////////////////
// template <typename T>
// static __global__ void stackCuda(void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) {
// T* z = reinterpret_cast<T*>(vz);
// __shared__ Nd4jLong zLen, totalThreads;
// __shared__ int zRank;
// if (threadIdx.x == 0) {
// zLen = shape::length(zShapeInfo);
// zRank = shape::rank(zShapeInfo);
// totalThreads = gridDim.x * blockDim.x;
// }
// __syncthreads();
// const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
// Nd4jLong coords[MAX_RANK];
// for (uint64_t i = tid; i < zLen; i += totalThreads) {
// shape::index2coords(i, zShapeInfo, coords);
// const auto zOffset = shape::getOffset(zShapeInfo, coords);
// const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[coords[axis]]);
// for (uint j = axis; j < zRank - 1; ++j) // shift coords staring from axis position
// coords[j] = coords[j + 1];
// const auto xOffset = shape::getOffset(xTadShapeInfo, coords);
// z[zOffset] = x[xOffset];
// }
// }
// ///////////////////////////////////////////////////////////////////
// template<typename T>
// __host__ static void stackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream,
// void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) {
// stackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, xTadShapeInfo, vz, zShapeInfo, axis);
// }
// BUILD_SINGLE_TEMPLATE(template void stackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis), LIBND4J_TYPES);
// ///////////////////////////////////////////////////////////////////
// void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) {
// const int threadsPerBlock = MAX_NUM_THREADS / 2;
// const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock;
// const int numOfSubArrs = inArrs.size();
// std::vector<void*> hInBuffers(numOfSubArrs);
// for(int i = 0; i < numOfSubArrs; ++i)
// hInBuffers[i] = inArrs[i]->specialBuffer();
// PointersManager manager(context, "helpers::stack");
// void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*));
// for(uint i = 0; i < numOfSubArrs; ++i)
// inArrs[i]->syncToDevice();
// output.syncToDevice();
// BUILD_SINGLE_SELECTOR(output.dataType(), stackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, inArrs[0]->specialShapeInfo(), output.specialBuffer(), output.specialShapeInfo(), axis), LIBND4J_TYPES);
// manager.synchronize();
// for(uint i = 0; i < numOfSubArrs; ++i)
// inArrs[i]->tickReadDevice();
// output.tickWriteDevice();
// }
}
}
}
|
e83cf44de6b9defb948f596b4bcc37a41b15e5c2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "cuda_helper.hpp"
#define Iloc(x,y) Iloc[x*n+y]
#define Jnloc(x,y) Jnloc[x*n+y]
/* Called From Device - Uses pointers stored on Global Memory only
* This function gets two pointers in two pixels of an image I
* and calculates their "neighborhood distance" squared (pixelwise distance of their respective "patchsize" neighborhoods)
* and returns the exp(-distance/filtersigma) of this distance
* Note: Each neighborhood is first pixelwise multiplied with a gaussian Kernel
*/
__device__ float patchFilt(float *Iloc,float *Jnloc,int n,float filtersigma,float *gaussianKrnl){
int offs = PATCH*PATCH/2;
float dif = 0, *krnl = gaussianKrnl+(offs); //This kernel now is aligned at the center of the kernel window (and not on the upper right)
for(int i=-R;i<=R;i++){
for(int j=-R; j<=R; j++){
float wkrnl = krnl[i*PATCH+j];
dif += wkrnl * wkrnl * (Iloc(i,j) - Jnloc(i,j)) * (Iloc(i,j) - Jnloc(i,j));
}
}
return exp(-dif/filtersigma);
}
/*
* CUDA Kernel that performs the non local means denoisation (Uses Global memory only)
* Inputs-Outputs: float *I (Image as a row major float 1D array) - Global Memory
* float *I (Output Denoised image stored in row major 1D format) -Global Memory
* int n (Size of the image - after the padding has taken place)
* int patchsize (defines the patch window size, typical values 3,5,7 (for respective windows [3x3],[5x5],[7x7]))
* float filtersigma (used for patchFilt(), for more info see patchFilt())
* float *gaussianKrnl (pointer to the gaussian kernel that is multiplied with each neighborhood) - Global Memory
*
*/
__global__ void nlmeans(float *I,
float *Idenoised,
int n,
int patchsize,
float filtersigma,
float *gaussianKrnl){
/*Each pixel is mapped on a block-thread (Valid assumption for Images with dimensions as such on the assignment (N=64~256))*/
int xi = threadIdx.x;
int xj = blockIdx.x ;
float Idloc = 0,z = 0;
//If the threads refers to a pixel inside the original image (not on the padded area)
//if( (xi >= ph) && (xi < n-ph) && (xj >= ph) && (xj < n-ph) ){
if( (xi >= R) && (xi < n-R) && (xj >= R) && (xj < n-R) ){
//Calculate distances of a specific pixel with every other pixel on the (original) image
for(int yi=R;yi<n-R;yi++){
for(int yj = R; yj < n-R; yj++){
float w = patchFilt(I+(xi*n+xj), I+(yi*n+yj),n, filtersigma, gaussianKrnl );
z +=w;
Idloc += w*I[yi*n+yj];
}
}
Idenoised[xi*n+xj] = Idloc/z;
}
}
int main(int argc, char *argv[]){
float * d_I, *d_Id,*gaussianKrnl;
path = init_path(argc,argv[1]);
std::cout << "Starting building Gaussian kernel ..";
float *kernel = mymalloc<float>(PATCH*PATCH);
buildGaussKernel(kernel,PATCH,PATCH,patchSigma);
std::cout << "Finished successfully" << std::endl;
std::cout << "Starting reading input image..";
image *im = read_png_file(path); int N = im->height, Npad = N+PATCH-1;
//Add gaussian noise
float mean = 0; float std = NOISE_STD;
addNoise(im->I,mean,std,N*N);
float *I = padarrayMir(im->I,N,PATCH);
std::cout << "Finished successfully" << std::endl;
float *Id = mymalloc<float>(Npad*Npad);
printVersion(1,N,PATCH,R,path);
std::cout << "Starting running CUDA nlmeans algorithm..";
printf("Launching Kernel with BLOCKS:%d (MAX_BLOCKS=%d), THREADS:%d(MAX_THREADS=%d)\n",Npad, MAX_BLOCKS, Npad,MAX_THREADS);
{
hipMalloc( (void **)&gaussianKrnl, sizeof(float) * PATCH * PATCH );
hipMemcpy( gaussianKrnl, kernel, sizeof(float) * PATCH * PATCH, hipMemcpyHostToDevice );
hipMalloc( (void **)&d_I, sizeof(float) * Npad * Npad );
hipMalloc( (void **)&d_Id, sizeof(float) * Npad * Npad );
hipMemcpy( d_I, I, sizeof(float) * Npad * Npad, hipMemcpyHostToDevice );}
struct timespec start = tic();
hipLaunchKernelGGL(( nlmeans), dim3(Npad),dim3(Npad), 0, 0, d_I,d_Id,Npad, PATCH , filterSigma, gaussianKrnl);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
struct timespec end = toc();
hipMemcpy( Id, d_Id, sizeof(float) * Npad * Npad, hipMemcpyDeviceToHost );
std::cout << "Finished successfully" << std::endl;
std::cout << "Starting writing denoised image to the disc..";
float *IdFinal = unpad(Id,N,Npad,PATCH);
write_png(im->I,N,N,"./images/image-Noised-AWGN-GPU-Global.png");
write_png(IdFinal,N,N,"./images/image-Denoised-GPU-Global.png");
writeIm(IdFinal,"./images/txts/image-Denoised-GPU-Global.txt",N,N);
std::cout << "Finished successfully" << std::endl;
hipFree(d_Id); hipFree(d_I); hipFree(gaussianKrnl);
free(I); free(Id); free(IdFinal);
free(im->I); free(im);
storeTimes(N,start,end,GPU_GLOBAL);
std::cout << "Main is exiting successfully" << std::endl;
return 0;
}
| e83cf44de6b9defb948f596b4bcc37a41b15e5c2.cu | #include "cuda_helper.hpp"
#define Iloc(x,y) Iloc[x*n+y]
#define Jnloc(x,y) Jnloc[x*n+y]
/* Called From Device - Uses pointers stored on Global Memory only
* This function gets two pointers in two pixels of an image I
* and calculates their "neighborhood distance" squared (pixelwise distance of their respective "patchsize" neighborhoods)
* and returns the exp(-distance/filtersigma) of this distance
* Note: Each neighborhood is first pixelwise multiplied with a gaussian Kernel
*/
__device__ float patchFilt(float *Iloc,float *Jnloc,int n,float filtersigma,float *gaussianKrnl){
int offs = PATCH*PATCH/2;
float dif = 0, *krnl = gaussianKrnl+(offs); //This kernel now is aligned at the center of the kernel window (and not on the upper right)
for(int i=-R;i<=R;i++){
for(int j=-R; j<=R; j++){
float wkrnl = krnl[i*PATCH+j];
dif += wkrnl * wkrnl * (Iloc(i,j) - Jnloc(i,j)) * (Iloc(i,j) - Jnloc(i,j));
}
}
return exp(-dif/filtersigma);
}
/*
* CUDA Kernel that performs the non local means denoisation (Uses Global memory only)
* Inputs-Outputs: float *I (Image as a row major float 1D array) - Global Memory
* float *I (Output Denoised image stored in row major 1D format) -Global Memory
* int n (Size of the image - after the padding has taken place)
* int patchsize (defines the patch window size, typical values 3,5,7 (for respective windows [3x3],[5x5],[7x7]))
* float filtersigma (used for patchFilt(), for more info see patchFilt())
* float *gaussianKrnl (pointer to the gaussian kernel that is multiplied with each neighborhood) - Global Memory
*
*/
__global__ void nlmeans(float *I,
float *Idenoised,
int n,
int patchsize,
float filtersigma,
float *gaussianKrnl){
/*Each pixel is mapped on a block-thread (Valid assumption for Images with dimensions as such on the assignment (N=64~256))*/
int xi = threadIdx.x;
int xj = blockIdx.x ;
float Idloc = 0,z = 0;
//If the threads refers to a pixel inside the original image (not on the padded area)
//if( (xi >= ph) && (xi < n-ph) && (xj >= ph) && (xj < n-ph) ){
if( (xi >= R) && (xi < n-R) && (xj >= R) && (xj < n-R) ){
//Calculate distances of a specific pixel with every other pixel on the (original) image
for(int yi=R;yi<n-R;yi++){
for(int yj = R; yj < n-R; yj++){
float w = patchFilt(I+(xi*n+xj), I+(yi*n+yj),n, filtersigma, gaussianKrnl );
z +=w;
Idloc += w*I[yi*n+yj];
}
}
Idenoised[xi*n+xj] = Idloc/z;
}
}
int main(int argc, char *argv[]){
float * d_I, *d_Id,*gaussianKrnl;
path = init_path(argc,argv[1]);
std::cout << "Starting building Gaussian kernel ..";
float *kernel = mymalloc<float>(PATCH*PATCH);
buildGaussKernel(kernel,PATCH,PATCH,patchSigma);
std::cout << "Finished successfully" << std::endl;
std::cout << "Starting reading input image..";
image *im = read_png_file(path); int N = im->height, Npad = N+PATCH-1;
//Add gaussian noise
float mean = 0; float std = NOISE_STD;
addNoise(im->I,mean,std,N*N);
float *I = padarrayMir(im->I,N,PATCH);
std::cout << "Finished successfully" << std::endl;
float *Id = mymalloc<float>(Npad*Npad);
printVersion(1,N,PATCH,R,path);
std::cout << "Starting running CUDA nlmeans algorithm..";
printf("Launching Kernel with BLOCKS:%d (MAX_BLOCKS=%d), THREADS:%d(MAX_THREADS=%d)\n",Npad, MAX_BLOCKS, Npad,MAX_THREADS);
{
cudaMalloc( (void **)&gaussianKrnl, sizeof(float) * PATCH * PATCH );
cudaMemcpy( gaussianKrnl, kernel, sizeof(float) * PATCH * PATCH, cudaMemcpyHostToDevice );
cudaMalloc( (void **)&d_I, sizeof(float) * Npad * Npad );
cudaMalloc( (void **)&d_Id, sizeof(float) * Npad * Npad );
cudaMemcpy( d_I, I, sizeof(float) * Npad * Npad, cudaMemcpyHostToDevice );}
struct timespec start = tic();
nlmeans<<<Npad,Npad>>>(d_I,d_Id,Npad, PATCH , filterSigma, gaussianKrnl);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
struct timespec end = toc();
cudaMemcpy( Id, d_Id, sizeof(float) * Npad * Npad, cudaMemcpyDeviceToHost );
std::cout << "Finished successfully" << std::endl;
std::cout << "Starting writing denoised image to the disc..";
float *IdFinal = unpad(Id,N,Npad,PATCH);
write_png(im->I,N,N,"./images/image-Noised-AWGN-GPU-Global.png");
write_png(IdFinal,N,N,"./images/image-Denoised-GPU-Global.png");
writeIm(IdFinal,"./images/txts/image-Denoised-GPU-Global.txt",N,N);
std::cout << "Finished successfully" << std::endl;
cudaFree(d_Id); cudaFree(d_I); cudaFree(gaussianKrnl);
free(I); free(Id); free(IdFinal);
free(im->I); free(im);
storeTimes(N,start,end,GPU_GLOBAL);
std::cout << "Main is exiting successfully" << std::endl;
return 0;
}
|
50c37c16bd9d72cf594120fa0dbc4447f3811938.hip | // !!! This is a file automatically generated by hipify!!!
//pass
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
p[threadIdx.x] = 0;
}
int main() {
int *c;
int *dev_c;
c = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i)
c[i] = rand() %10+1;
hipMalloc((void**)&dev_c, N*sizeof(int));
hipMemcpy(dev_c, c, N*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( foo), dim3(1), dim3(N), 0, 0, dev_c);
//ESBMC_verify_kernel(foo,1,N,dev_c);
hipMemcpy(c, dev_c, N*sizeof(int), hipMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
assert(c[i] == 0);
free(c);
hipFree(dev_c);
return 0;
}
| 50c37c16bd9d72cf594120fa0dbc4447f3811938.cu | //pass
#include <stdio.h>
#include <cuda.h>
#include <assert.h>
#define N 2//64
__global__ void foo(int* p) {
p[threadIdx.x] = 0;
}
int main() {
int *c;
int *dev_c;
c = (int*)malloc(N*sizeof(int));
for (int i = 0; i < N; ++i)
c[i] = rand() %10+1;
cudaMalloc((void**)&dev_c, N*sizeof(int));
cudaMemcpy(dev_c, c, N*sizeof(int), cudaMemcpyHostToDevice);
foo<<<1, N>>>(dev_c);
//ESBMC_verify_kernel(foo,1,N,dev_c);
cudaMemcpy(c, dev_c, N*sizeof(int), cudaMemcpyDeviceToHost);
for (int i = 0; i < N; ++i)
assert(c[i] == 0);
free(c);
cudaFree(dev_c);
return 0;
}
|
c7333c3359db2c273cd3eedf35190ba2151143bd.hip | // !!! This is a file automatically generated by hipify!!!
/*
* 3D chain_loc v 1.0
* This rendition is to build off of the chain loc code, but follow the derivation in Smith et al 2010 nat meth
*/
/*
Expected input and output
[xf_all, yf_all, zf_all, N, off_all, xf_crlb, yf_crlb, zf_crlb, N_crlb, off_crlb, llv_all] = 3d_chain_loc(ilocs, zcurve, numthreads, angle)
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define PI 3.14159265358979323846
/*
* Device code
*
* To facilitate coding (for me) I have copied the localization algorithm to be used with multiple sized areas
*/
/*
Device Functions
*/
__device__ float device_det(float fisher[25]) // updated determinant to 5 x 5 as of 2-6-18
{
float det;
det = fisher[0] * (fisher[6] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) + fisher[16] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[21] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]))) - fisher[5] * (fisher[1] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[16] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]))) + fisher[10] * (fisher[1] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[16] * (fisher[2] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[7] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[7] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) - fisher[15] * (fisher[1] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) + fisher[11] * (fisher[2] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[7] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]) - fisher[7] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]) + fisher[12] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) + fisher[20] * (fisher[1] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) + fisher[11] * (fisher[2] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[7] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[16] * (fisher[2] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]) - fisher[7] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]) + fisher[12] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])));
return det;
}
/*
Global Functions
*/
// localize 9 *updating for 3D 2/7/18
__global__ void localize13(float *d_iall, // pointer to our image variable
float *d_xf_all, // pointer for final x-coordinate measurement
float *d_yf_all, // pointer for final y-coordinate measurement
float *d_zf_all, // pointer for final z-coordinate measurement
float *d_N, // pointer for final N measurement
float *d_off,// pointer for final offset measurement
float *d_xf_crlb,// pointer for final x-coordinate uncertainty
float *d_yf_crlb,// pointer for final y-coordinate uncertainty
float *d_zf_crlb,// pointer for final z-coordinate uncertainty
float *d_N_crlb,// pointer for final N uncertainty
float *d_off_crlb,// pointer for final offset uncertainty
float *d_llv,// pointer for final log likelihood value calculation
float ang,// rotation of the fitting grid in radians
float *d_zcurve,// pointer for defocusing constants in [sxo, syo, ax, ay, bx, by, gx, gy]
int numi)// pointer for final x-coordinate measurement
{
// Declare variables
int pix = 13; // number of pixels in the localization image
__shared__ float xgrid[169]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ float ygrid[169]; // this will reduce calls to global device memory
// Assign defocusing constants from zcurve
__shared__ float sxo;
sxo = d_zcurve[0];
__shared__ float syo;
syo = d_zcurve[1];
__shared__ float axo;
axo = d_zcurve[2];
__shared__ float ayo;
ayo = d_zcurve[3];
__shared__ float bxo;
bxo = d_zcurve[4];
__shared__ float byo;
byo = d_zcurve[5];
__shared__ float dxo;
dxo = d_zcurve[6];
__shared__ float dyo;
dyo = d_zcurve[7];
__shared__ float gxo;
gxo = d_zcurve[8];
__shared__ float gyo;
gyo = d_zcurve[9];
// local register variables
float dudx, dudy, dudz, dudsx, dudsy, d2udx2, d2udy2, d2udz2, d2udsx2, d2udsy2, dudn, dudo, Ex, Ey, u;
float dsxdz, dsydz, d2sxdz2, d2sydz2;
float db1, db2, db3, db4, db5, db6; // variables for debugging
float d_x, d_y, d_z, d_n, d_o, dd_x, dd_y, dd_z, dd_n, dd_o, x, y, sx, sy;
// fitting parameters
float xf, yf, zf, N, b;
int tx = threadIdx.x;
int index = blockIdx.x*blockDim.x + tx; // calculate thread index
float d_i2[169]; // initialize data for image
float llv;
float fisher[25] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
float det_fish = 0.0;
// create xgrid and ygrid we want to create the grid regardless of whether the index is crunching on an image
if (tx == 0) {
for (int i = 0; i <pix; i++) { // here the grid is constructed by assigning x and y the pixel index, then rotating with a rotation transform by known angle ang
for (int j = 0; j <pix; j++) {
x = (float)j - ((float)pix - 1.0) / 2.0;
y = (float)i - ((float)pix - 1.0) / 2.0;
xgrid[j*pix + i] = x*cos(ang) - y*sin(ang);
ygrid[j*pix + i] = x*sin(ang) + y*cos(ang);
}
}
}
if (index < numi) { // check to see that threads only work if an image exists
// buffer all the variables into shared memory and registers and build guesses
xf = 0.0; // xf
yf = 0.0; // yf
db5 = 0.0;
db6 = 0.0;
N = 0.0; // N
zf = 0.0; // set z to be 0 which is close to the disk of least confusion
b = 100000; // offset
for (int i = 0; i < pix*pix; i++) {
d_i2[i] = d_iall[i + index*pix*pix]; // this buffers pixels into each d_i2, the thread index determines which image is analyzed
N += d_i2[i];
}
for(int i = 0; i < pix*pix; i++) {
xf += xgrid[i] * d_i2[i]; // sum of x and image weight
yf += ygrid[i] * d_i2[i]; // sum of y and image weight
// image sum
if (i == 1) { db5 = d_i2[i]; db6 = xf;}
if (b > d_i2[i]) { b = d_i2[i]; } // find minimum of image
}
db1 = xf;
db2 = yf;
db3 = N;
db4 = b;
xf = xf / N;
yf = yf / N;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 1; counttry++) {
d_x = 0.0;
d_y = 0.0;
d_z = 0.0;
d_n = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_z = 0.0;
dd_n = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < pix; rowcount++) { // FOR 2 loops over all rows
for (int colcount = 0; colcount < pix; colcount++) { // FOR 3 loops over all columns
sx = sxo*powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5);
sy = syo*powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5);
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*pix] - xf + 0.5) / sqrt(2.0 * sx * sx)) - erf((xgrid[rowcount + colcount*pix] - xf - 0.5) / sqrt(2.0 * sx * sx)));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*pix] - yf + 0.5) / sqrt(2.0 * sy * sy)) - erf((ygrid[rowcount + colcount*pix] - yf - 0.5) / sqrt(2.0 * sy * sy)));
u = N * Ex*Ey + b;
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (N / sqrt(2.0 * PI*sx * sx))*(exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)))*Ey;
dudy = (N / sqrt(2.0 * PI*sy * sy))*(exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)))*Ex;
dudsx = (N *Ey / (sqrt(2.0*PI) * powf(sx, 2.0)))*((xgrid[rowcount + colcount*pix] - xf - 0.5) * exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)));
dudsy = (N *Ex / (sqrt(2.0*PI) * powf(sy, 2.0)))*((ygrid[rowcount + colcount*pix] - yf - 0.5) * exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)));
dudn = Ex*Ey;
dsxdz = sxo*(2 * (zf - gxo) / (dxo*dxo) + axo * 3 * powf((zf - gxo), 2) / powf(dxo, 3) + bxo * 4 * powf((zf - gxo), 3) / powf(dxo, 4)) /
(2 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5));
dsydz = syo*(2 * (zf - gyo) / (dyo*dyo) + ayo * 3 * powf((zf - gyo), 2) / powf(dyo, 3) + byo * 4 * powf((zf - gyo), 3) / powf(dyo, 4)) /
(2 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5));
dudz = dudsx*dsxdz + dudsy*dsydz;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (N / (sqrt(2.0 * PI)*powf(sx, 3.0))*((xgrid[rowcount + colcount*pix] - xf - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx))))*Ey;
d2udy2 = (N / (sqrt(2.0 * PI)*powf(sy, 3.0))*((ygrid[rowcount + colcount*pix] - yf - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy))))*Ex;
d2udsx2 = (Ey*N / (sqrt(2.0 * PI)))
*(powf(sx, -5.0)*(powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 3)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- powf((xgrid[rowcount + colcount*pix] - xf + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)))
- 2 * powf(sx, -3.0)*((xgrid[rowcount + colcount*pix] - xf - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx))));
d2udsy2 = (Ex*N / (sqrt(2.0 * PI)))
*(powf(sy, -5.0)*(powf((ygrid[rowcount + colcount*pix] - yf - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- powf((ygrid[rowcount + colcount*pix] - yf + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)))
- 2 * powf(sy, -3.0)*((ygrid[rowcount + colcount*pix] - yf - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy))));
d2sxdz2 = sxo*(2 / powf(dxo, 2.0) + axo * 6 * (zf - gxo) / powf(dxo, 3) + bxo * 12 * powf(zf - gxo, 2.0) / powf(dxo, 4)) /
(2 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5)) -
sxo*powf(2 * (zf - gxo) / powf(dxo, 2) + axo * 3 * powf(zf - gxo, 2) / powf(dxo, 3) + bxo * 4 * powf(zf - gxo, 3) / powf(dxo, 4), 2) /
(4 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 1.5));
d2sydz2 = syo*(2 / powf(dyo, 2.0) + ayo * 6 * (zf - gyo) / powf(dyo, 3) + byo * 12 * powf(zf - gyo, 2.0) / powf(dyo, 4)) /
(2 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5)) -
syo*powf(2 * (zf - gyo) / powf(dyo, 2) + ayo * 3 * powf(zf - gyo, 2) / powf(dyo, 3) + byo * 4 * powf(zf - gyo, 3) / powf(dyo, 4), 2) /
(4 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 1.5));
d2udz2 = d2udsx2*powf(dsxdz, 2) + dudsx*d2sxdz2 + d2udsy2*powf(dsydz, 2) + dudsy*d2sydz2;
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_z = d_z + dudz*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_z = dd_z + d2udz2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudz, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*pix] / powf(u, 2.0);
if (counttry == 19) { // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudz / u;
fisher[5] += dudy*dudx / u;
fisher[6] += dudy*dudy / u;
fisher[7] += dudy*dudn / u;
fisher[8] += dudy*dudo / u;
fisher[9] += dudy*dudz / u;
fisher[10] += dudn*dudx / u; // the format has been updated but not the mathematics 2/7/18
fisher[11] += dudn*dudy / u;
fisher[12] += dudn*dudn / u;
fisher[13] += dudn*dudo / u;
fisher[14] += dudn*dudz / u;
fisher[15] += dudo*dudx / u;
fisher[16] += dudo*dudy / u;
fisher[17] += dudo*dudn / u;
fisher[18] += dudo*dudo / u;
fisher[19] += dudo*dudz / u;
fisher[20] += dudz*dudx / u;
fisher[21] += dudz*dudy / u;
fisher[22] += dudz*dudn / u;
fisher[23] += dudz*dudo / u;
fisher[24] += dudz*dudz / u;
llv += d_i2[rowcount + colcount*pix] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*pix] * log(d_i2[rowcount + colcount*pix] + 0.0000000000000001) + d_i2[rowcount + colcount*pix];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
xf = xf - d_x / dd_x;
yf = yf - d_y / dd_y;
zf = zf - d_z / dd_z;
N = N - d_n / dd_n;
b = b - d_o / dd_o;
} // end FOR 1
if (xf == xf && yf == yf && zf == zf && N == N && b == b && sx == sx && sy == sy && b == b) { // begin is numeric if statement
if (N > 0 && xf >= -3 && xf <= 3 && yf <= 3 && yf >= -3) { // was the molecule inside the image? Was N positive? if yes then record the point
// Proper Reporting Section
/*
d_xf_all[index] = xf; // correct position for x
d_yf_all[index] = yf; // correct position for y
d_zf_all[index] = zf;
d_N[index] = N;
d_off[index] = b;
d_llv[index] = llv;
*/
// Debugging Section Keep Commented for regular operation
d_xf_all[index] = db1; // correct position for x
d_yf_all[index] = db2; // correct position for y
d_zf_all[index] = db3;
d_N[index] = db4;
d_off[index] = db5;
d_llv[index] = db6;
// calculate crlb's for estimators
// updated for zf
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[6] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) + fisher[16] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[21] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]))) / det_fish;
d_yf_crlb[index] = -(fisher[0] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[10] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[15] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) - fisher[20] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]))) / det_fish;
d_N_crlb[index] = +(fisher[0] * (fisher[6] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[16] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[21] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) - fisher[5] * (fisher[1] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[16] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[21] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[15] * (fisher[1] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[6] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[21] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[20] * (fisher[1] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[6] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[16] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) / det_fish;
d_off_crlb[index] = -(fisher[0] * (fisher[6] * (fisher[12] * fisher[24] - fisher[22] * fisher[14]) - fisher[11] * (fisher[7] * fisher[24] - fisher[22] * fisher[9]) + fisher[21] * (fisher[7] * fisher[14] - fisher[12] * fisher[9])) - fisher[5] * (fisher[1] * (fisher[12] * fisher[24] - fisher[22] * fisher[14]) - fisher[11] * (fisher[2] * fisher[24] - fisher[22] * fisher[4]) + fisher[21] * (fisher[2] * fisher[14] - fisher[12] * fisher[4])) + fisher[10] * (fisher[1] * (fisher[7] * fisher[24] - fisher[22] * fisher[9]) - fisher[6] * (fisher[2] * fisher[24] - fisher[22] * fisher[4]) + fisher[21] * (fisher[2] * fisher[9] - fisher[7] * fisher[4])) - fisher[20] * (fisher[1] * (fisher[7] * fisher[14] - fisher[12] * fisher[9]) - fisher[6] * (fisher[2] * fisher[14] - fisher[12] * fisher[4]) + fisher[11] * (fisher[2] * fisher[9] - fisher[7] * fisher[4]))) / det_fish;
d_zf_crlb[index] = +(fisher[0] * (fisher[6] * (fisher[12] * fisher[18] - fisher[17] * fisher[13]) - fisher[11] * (fisher[7] * fisher[18] - fisher[17] * fisher[8]) + fisher[16] * (fisher[7] * fisher[13] - fisher[12] * fisher[8])) - fisher[5] * (fisher[1] * (fisher[12] * fisher[18] - fisher[17] * fisher[13]) - fisher[11] * (fisher[2] * fisher[18] - fisher[17] * fisher[3]) + fisher[16] * (fisher[2] * fisher[13] - fisher[12] * fisher[3])) + fisher[10] * (fisher[1] * (fisher[7] * fisher[18] - fisher[17] * fisher[8]) - fisher[6] * (fisher[2] * fisher[18] - fisher[17] * fisher[3]) + fisher[16] * (fisher[2] * fisher[8] - fisher[7] * fisher[3])) - fisher[15] * (fisher[1] * (fisher[7] * fisher[13] - fisher[12] * fisher[8]) - fisher[6] * (fisher[2] * fisher[13] - fisher[12] * fisher[3]) + fisher[11] * (fisher[2] * fisher[8] - fisher[7] * fisher[3]))) / det_fish;
}
else { // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_zf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_zf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_llv[index] = llv;
}
} //end is numeric if statement
else {
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_zf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_zf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_llv[index] = llv;
} // end else fail statement
}
} // end localize 9
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
float *iall; // the pointer to the array of all images to be analyzed
float *d_iall; // Pointer to image array on gpu
float *zcurve;
float *d_zcurve;
float angle;
float *d_xf_all;
float *d_yf_all;
float *d_zf_all;
float *d_N;
float *d_off;
float *d_llv;
float *d_xf_crlb;
float *d_yf_crlb;
float *d_zf_crlb;
float *d_N_crlb;
float *d_off_crlb;
float *xf, *xfc, *yf, *yfc, *n, *nc, *zf, *zfc, *off, *offc, *llv;
size_t threadsperblock;
int irow; // number of pixels in a row which should also be the number in a coloumn
int numi; // number of images imported
const size_t *idims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 4) {
printf("Must have 4 inputs ( i1, numthreads, angle(in rads), defocusing constants)\n");
mexErrMsgTxt("See Error above!\n");
}
// Error statement to handle ilocs being type single
if (!mxIsSingle(prhs[0]) || mxIsComplex(prhs[0])) {
printf("i1 must be a nxm float array\n");
mexErrMsgTxt("See Error above!\n");
}
// Error statement if angle is not type single
if (!mxIsSingle(prhs[3]) || mxIsComplex(prhs[3])) {
printf("angle must be a single\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsSingle(prhs[1]) || mxIsComplex(prhs[1])) {
printf("defocus constants must be of type single\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (float *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
irow = (int)idims[0];
numi = (int)idims[1];
angle = (float)mxGetScalar(prhs[3]);
zcurve = (float *)mxGetPr(prhs[1]);
if (numi > 1000000 || numi < 1) {
numi = 1;
}
int imem = irow*numi * sizeof(float);
int vmem = numi * sizeof(float);
// verify that the input variables are what was expected
// check that iloc is a *perfect square* by num_mol array
if (irow != 169) {
printf("Images are of incorrect size. There must be a perfect square number of rows in the entry.\n");
mexErrMsgTxt("See Error above!\n");
}
if (nlhs != 11) {
printf("You must have 11 output variables [xf_all, yf_all, zf_all, N, off_all, xf_crlb, yf_crlb, zf_crlb, N_crlb, off_crlb, llv_all]\n");
mexErrMsgTxt("See Error above!\n");
}
// allocate memory and copy it onto the gpu device
// iall
checkCudaErrors(hipMalloc((void**)&d_iall, imem)); // allocate image memory
checkCudaErrors(hipMalloc((void**)&d_zcurve, sizeof(float) * 10)); // allocate image memory
checkCudaErrors(hipMemcpy(d_iall, iall, imem, hipMemcpyHostToDevice)); // copy images from device to host
checkCudaErrors(hipMemcpy(d_zcurve, zcurve, sizeof(float) * 10, hipMemcpyHostToDevice));
// allocate memory for fitted variables that will be returned from device
checkCudaErrors(hipMalloc((void**)&d_xf_all, vmem)); // allocate xf_all memory
checkCudaErrors(hipMalloc((void**)&d_xf_crlb, vmem)); // allocate xf_crlb memory
checkCudaErrors(hipMalloc((void**)&d_yf_all, vmem)); // allocate yf_all memory
checkCudaErrors(hipMalloc((void**)&d_yf_crlb, vmem)); // allocate yf_crlb memory
checkCudaErrors(hipMalloc((void**)&d_zf_all, vmem)); // allocate zf memory
checkCudaErrors(hipMalloc((void**)&d_zf_crlb, vmem)); // allocate zf_crlb memory
checkCudaErrors(hipMalloc((void**)&d_N, vmem)); // allocate N memory
checkCudaErrors(hipMalloc((void**)&d_N_crlb, vmem)); // allocate N_crlb memory
checkCudaErrors(hipMalloc((void**)&d_off, vmem)); // allocate off memory
checkCudaErrors(hipMalloc((void**)&d_off_crlb, vmem)); // allocate N memory
checkCudaErrors(hipMalloc((void**)&d_llv, vmem)); // allocate llv memory
/* Run GPU kernel*/
threadsperblock = mxGetScalar(prhs[2]); // get number of threads perblock from matlab
localize13 << <((numi - 1) / threadsperblock + 1), threadsperblock >> >(d_iall, d_xf_all, d_yf_all, d_zf_all, d_N, d_off, d_xf_crlb, d_yf_crlb, d_zf_crlb, d_N_crlb, d_off_crlb, d_llv, angle, d_zcurve, numi);
// Allocate host side memory for output arrays at the output pointer positions
plhs[0] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[2] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[3] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[4] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[5] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[6] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
plhs[7] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[8] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[9] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[10] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
// Copy pointers from mex array
xf = (float *)mxGetPr(plhs[0]);
xfc = (float *)mxGetPr(plhs[1]);
yf = (float *)mxGetPr(plhs[2]);
yfc = (float *)mxGetPr(plhs[3]);
zf = (float *)mxGetPr(plhs[4]);
zfc = (float *)mxGetPr(plhs[5]);
n = (float *)mxGetPr(plhs[6]);
nc = (float *)mxGetPr(plhs[7]);
off = (float *)mxGetPr(plhs[8]);
offc = (float *)mxGetPr(plhs[9]);
llv = (float *)mxGetPr(plhs[10]); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
// copy memory from device to host memory at mex array pointers
checkCudaErrors(hipMemcpy(xf, d_xf_all, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(xfc, d_xf_crlb, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(yf, d_yf_all, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(yfc, d_yf_crlb, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(zf, d_zf_all, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(zfc, d_zf_crlb, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(n, d_N, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(nc, d_N_crlb, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(off, d_off, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(offc, d_off_crlb, vmem, hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(llv, d_llv, vmem, hipMemcpyDeviceToHost)); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
// clean up
hipFree(d_iall);
hipFree(d_N);
hipFree(d_xf_all);
hipFree(d_yf_all);
hipFree(d_zf_all);
hipFree(d_off);
hipFree(d_xf_crlb);
hipFree(d_yf_crlb);
hipFree(d_zf_crlb);
hipFree(d_N_crlb);
hipFree(d_off_crlb);
hipFree(d_llv);
hipFree(d_zcurve);
} // DONE
| c7333c3359db2c273cd3eedf35190ba2151143bd.cu | /*
* 3D chain_loc v 1.0
* This rendition is to build off of the chain loc code, but follow the derivation in Smith et al 2010 nat meth
*/
/*
Expected input and output
[xf_all, yf_all, zf_all, N, off_all, xf_crlb, yf_crlb, zf_crlb, N_crlb, off_crlb, llv_all] = 3d_chain_loc(ilocs, zcurve, numthreads, angle)
*/
#include <mex.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
// includes, project
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define PI 3.14159265358979323846
/*
* Device code
*
* To facilitate coding (for me) I have copied the localization algorithm to be used with multiple sized areas
*/
/*
Device Functions
*/
__device__ float device_det(float fisher[25]) // updated determinant to 5 x 5 as of 2-6-18
{
float det;
det = fisher[0] * (fisher[6] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) + fisher[16] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[21] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]))) - fisher[5] * (fisher[1] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[16] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]))) + fisher[10] * (fisher[1] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[16] * (fisher[2] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[7] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[7] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) - fisher[15] * (fisher[1] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) + fisher[11] * (fisher[2] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[7] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[21] * (fisher[2] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]) - fisher[7] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]) + fisher[12] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) + fisher[20] * (fisher[1] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[6] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) + fisher[11] * (fisher[2] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[7] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[16] * (fisher[2] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]) - fisher[7] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]) + fisher[12] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])));
return det;
}
/*
Global Functions
*/
// localize 9 *updating for 3D 2/7/18
__global__ void localize13(float *d_iall, // pointer to our image variable
float *d_xf_all, // pointer for final x-coordinate measurement
float *d_yf_all, // pointer for final y-coordinate measurement
float *d_zf_all, // pointer for final z-coordinate measurement
float *d_N, // pointer for final N measurement
float *d_off,// pointer for final offset measurement
float *d_xf_crlb,// pointer for final x-coordinate uncertainty
float *d_yf_crlb,// pointer for final y-coordinate uncertainty
float *d_zf_crlb,// pointer for final z-coordinate uncertainty
float *d_N_crlb,// pointer for final N uncertainty
float *d_off_crlb,// pointer for final offset uncertainty
float *d_llv,// pointer for final log likelihood value calculation
float ang,// rotation of the fitting grid in radians
float *d_zcurve,// pointer for defocusing constants in [sxo, syo, ax, ay, bx, by, gx, gy]
int numi)// pointer for final x-coordinate measurement
{
// Declare variables
int pix = 13; // number of pixels in the localization image
__shared__ float xgrid[169]; // allocate xpix and ypix variables to the shared memory of the blocks
__shared__ float ygrid[169]; // this will reduce calls to global device memory
// Assign defocusing constants from zcurve
__shared__ float sxo;
sxo = d_zcurve[0];
__shared__ float syo;
syo = d_zcurve[1];
__shared__ float axo;
axo = d_zcurve[2];
__shared__ float ayo;
ayo = d_zcurve[3];
__shared__ float bxo;
bxo = d_zcurve[4];
__shared__ float byo;
byo = d_zcurve[5];
__shared__ float dxo;
dxo = d_zcurve[6];
__shared__ float dyo;
dyo = d_zcurve[7];
__shared__ float gxo;
gxo = d_zcurve[8];
__shared__ float gyo;
gyo = d_zcurve[9];
// local register variables
float dudx, dudy, dudz, dudsx, dudsy, d2udx2, d2udy2, d2udz2, d2udsx2, d2udsy2, dudn, dudo, Ex, Ey, u;
float dsxdz, dsydz, d2sxdz2, d2sydz2;
float db1, db2, db3, db4, db5, db6; // variables for debugging
float d_x, d_y, d_z, d_n, d_o, dd_x, dd_y, dd_z, dd_n, dd_o, x, y, sx, sy;
// fitting parameters
float xf, yf, zf, N, b;
int tx = threadIdx.x;
int index = blockIdx.x*blockDim.x + tx; // calculate thread index
float d_i2[169]; // initialize data for image
float llv;
float fisher[25] = { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 };
float det_fish = 0.0;
// create xgrid and ygrid we want to create the grid regardless of whether the index is crunching on an image
if (tx == 0) {
for (int i = 0; i <pix; i++) { // here the grid is constructed by assigning x and y the pixel index, then rotating with a rotation transform by known angle ang
for (int j = 0; j <pix; j++) {
x = (float)j - ((float)pix - 1.0) / 2.0;
y = (float)i - ((float)pix - 1.0) / 2.0;
xgrid[j*pix + i] = x*cos(ang) - y*sin(ang);
ygrid[j*pix + i] = x*sin(ang) + y*cos(ang);
}
}
}
if (index < numi) { // check to see that threads only work if an image exists
// buffer all the variables into shared memory and registers and build guesses
xf = 0.0; // xf
yf = 0.0; // yf
db5 = 0.0;
db6 = 0.0;
N = 0.0; // N
zf = 0.0; // set z to be 0 which is close to the disk of least confusion
b = 100000; // offset
for (int i = 0; i < pix*pix; i++) {
d_i2[i] = d_iall[i + index*pix*pix]; // this buffers pixels into each d_i2, the thread index determines which image is analyzed
N += d_i2[i];
}
for(int i = 0; i < pix*pix; i++) {
xf += xgrid[i] * d_i2[i]; // sum of x and image weight
yf += ygrid[i] * d_i2[i]; // sum of y and image weight
// image sum
if (i == 1) { db5 = d_i2[i]; db6 = xf;}
if (b > d_i2[i]) { b = d_i2[i]; } // find minimum of image
}
db1 = xf;
db2 = yf;
db3 = N;
db4 = b;
xf = xf / N;
yf = yf / N;
// start the for loop iterations FOR 1
for (int counttry = 0; counttry < 1; counttry++) {
d_x = 0.0;
d_y = 0.0;
d_z = 0.0;
d_n = 0.0;
d_o = 0.0;
dd_x = 0.0; //wipe incremental variables each loop to give correct correction factor
dd_y = 0.0;
dd_z = 0.0;
dd_n = 0.0;
dd_o = 0.0;
u = 0;
Ey = 0;
Ex = 0;
llv = 0.0;
// Calculate pixel values for derivatives, 2nd derivatives, errorfunctions and u
for (int rowcount = 0; rowcount < pix; rowcount++) { // FOR 2 loops over all rows
for (int colcount = 0; colcount < pix; colcount++) { // FOR 3 loops over all columns
sx = sxo*powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5);
sy = syo*powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5);
// x/ygrid is col major(come from matlab) and i3 is col major
// these three lines help define the fitting gaussian as deined by the current iteration of parameters
Ex = 0.5 * (erf((xgrid[rowcount + colcount*pix] - xf + 0.5) / sqrt(2.0 * sx * sx)) - erf((xgrid[rowcount + colcount*pix] - xf - 0.5) / sqrt(2.0 * sx * sx)));
Ey = 0.5 * (erf((ygrid[rowcount + colcount*pix] - yf + 0.5) / sqrt(2.0 * sy * sy)) - erf((ygrid[rowcount + colcount*pix] - yf - 0.5) / sqrt(2.0 * sy * sy)));
u = N * Ex*Ey + b;
// first derivatives calculations
// these are done pixel by pixel with the sum added up in the d_x and dd_x areas
dudx = (N / sqrt(2.0 * PI*sx * sx))*(exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)))*Ey;
dudy = (N / sqrt(2.0 * PI*sy * sy))*(exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)))*Ex;
dudsx = (N *Ey / (sqrt(2.0*PI) * powf(sx, 2.0)))*((xgrid[rowcount + colcount*pix] - xf - 0.5) * exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)));
dudsy = (N *Ex / (sqrt(2.0*PI) * powf(sy, 2.0)))*((ygrid[rowcount + colcount*pix] - yf - 0.5) * exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)));
dudn = Ex*Ey;
dsxdz = sxo*(2 * (zf - gxo) / (dxo*dxo) + axo * 3 * powf((zf - gxo), 2) / powf(dxo, 3) + bxo * 4 * powf((zf - gxo), 3) / powf(dxo, 4)) /
(2 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5));
dsydz = syo*(2 * (zf - gyo) / (dyo*dyo) + ayo * 3 * powf((zf - gyo), 2) / powf(dyo, 3) + byo * 4 * powf((zf - gyo), 3) / powf(dyo, 4)) /
(2 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5));
dudz = dudsx*dsxdz + dudsy*dsydz;
dudo = 1.0;
// second derivatives
// these are calcualted in a similar manner to the first derivatives
d2udx2 = (N / (sqrt(2.0 * PI)*powf(sx, 3.0))*((xgrid[rowcount + colcount*pix] - xf - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx))))*Ey;
d2udy2 = (N / (sqrt(2.0 * PI)*powf(sy, 3.0))*((ygrid[rowcount + colcount*pix] - yf - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy))))*Ex;
d2udsx2 = (Ey*N / (sqrt(2.0 * PI)))
*(powf(sx, -5.0)*(powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 3)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- powf((xgrid[rowcount + colcount*pix] - xf + 0.5), 3)*exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx)))
- 2 * powf(sx, -3.0)*((xgrid[rowcount + colcount*pix] - xf - 0.5)*exp(-powf(xgrid[rowcount + colcount*pix] - xf - 0.5, 2.0) / (2.0 * sx * sx))
- (xgrid[rowcount + colcount*pix] - xf + 0.5) *exp(-powf(xgrid[rowcount + colcount*pix] - xf + 0.5, 2.0) / (2.0 * sx * sx))));
d2udsy2 = (Ex*N / (sqrt(2.0 * PI)))
*(powf(sy, -5.0)*(powf((ygrid[rowcount + colcount*pix] - yf - 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- powf((ygrid[rowcount + colcount*pix] - yf + 0.5), 3)*exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy)))
- 2 * powf(sy, -3.0)*((ygrid[rowcount + colcount*pix] - yf - 0.5)*exp(-powf(ygrid[rowcount + colcount*pix] - yf - 0.5, 2.0) / (2.0 * sy * sy))
- (ygrid[rowcount + colcount*pix] - yf + 0.5) *exp(-powf(ygrid[rowcount + colcount*pix] - yf + 0.5, 2.0) / (2.0 * sy * sy))));
d2sxdz2 = sxo*(2 / powf(dxo, 2.0) + axo * 6 * (zf - gxo) / powf(dxo, 3) + bxo * 12 * powf(zf - gxo, 2.0) / powf(dxo, 4)) /
(2 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 0.5)) -
sxo*powf(2 * (zf - gxo) / powf(dxo, 2) + axo * 3 * powf(zf - gxo, 2) / powf(dxo, 3) + bxo * 4 * powf(zf - gxo, 3) / powf(dxo, 4), 2) /
(4 * powf(1 + powf((zf - gxo) / dxo, 2.0) + axo*powf((zf - gxo) / dxo, 3.0) + bxo*powf((zf - gxo) / dxo, 4.0), 1.5));
d2sydz2 = syo*(2 / powf(dyo, 2.0) + ayo * 6 * (zf - gyo) / powf(dyo, 3) + byo * 12 * powf(zf - gyo, 2.0) / powf(dyo, 4)) /
(2 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 0.5)) -
syo*powf(2 * (zf - gyo) / powf(dyo, 2) + ayo * 3 * powf(zf - gyo, 2) / powf(dyo, 3) + byo * 4 * powf(zf - gyo, 3) / powf(dyo, 4), 2) /
(4 * powf(1 + powf((zf - gyo) / dyo, 2.0) + ayo*powf((zf - gyo) / dyo, 3.0) + byo*powf((zf - gyo) / dyo, 4.0), 1.5));
d2udz2 = d2udsx2*powf(dsxdz, 2) + dudsx*d2sxdz2 + d2udsy2*powf(dsydz, 2) + dudsy*d2sydz2;
// summing variable to lead to correction factors
// these variables keep track of the correction which is given by summing over the entire pixel
d_x = d_x + dudx*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_x = dd_x + d2udx2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudx, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_y = d_y + dudy*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_y = dd_y + d2udy2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudy, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_z = d_z + dudz*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_z = dd_z + d2udz2*((d_i2[rowcount + colcount*pix] / u) - 1.0) - powf(dudz, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2.0);
d_n = d_n + dudn*((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_n = dd_n - powf(dudn, 2.0) * d_i2[rowcount + colcount*pix] / powf(u, 2);
d_o = d_o + ((d_i2[rowcount + colcount*pix] / u) - 1.0);
dd_o = dd_o - d_i2[rowcount + colcount*pix] / powf(u, 2.0);
if (counttry == 19) { // on the last count, construct fisher information matrix elements
fisher[0] += dudx*dudx / u;
fisher[1] += dudx*dudy / u;
fisher[2] += dudx*dudn / u;
fisher[3] += dudx*dudo / u;
fisher[4] += dudx*dudz / u;
fisher[5] += dudy*dudx / u;
fisher[6] += dudy*dudy / u;
fisher[7] += dudy*dudn / u;
fisher[8] += dudy*dudo / u;
fisher[9] += dudy*dudz / u;
fisher[10] += dudn*dudx / u; // the format has been updated but not the mathematics 2/7/18
fisher[11] += dudn*dudy / u;
fisher[12] += dudn*dudn / u;
fisher[13] += dudn*dudo / u;
fisher[14] += dudn*dudz / u;
fisher[15] += dudo*dudx / u;
fisher[16] += dudo*dudy / u;
fisher[17] += dudo*dudn / u;
fisher[18] += dudo*dudo / u;
fisher[19] += dudo*dudz / u;
fisher[20] += dudz*dudx / u;
fisher[21] += dudz*dudy / u;
fisher[22] += dudz*dudn / u;
fisher[23] += dudz*dudo / u;
fisher[24] += dudz*dudz / u;
llv += d_i2[rowcount + colcount*pix] * log(u + 0.0000000000000001) - u - d_i2[rowcount + colcount*pix] * log(d_i2[rowcount + colcount*pix] + 0.0000000000000001) + d_i2[rowcount + colcount*pix];
}
} // END FOR 3
} // END FOR2
// correct beta1 values with tolerances
xf = xf - d_x / dd_x;
yf = yf - d_y / dd_y;
zf = zf - d_z / dd_z;
N = N - d_n / dd_n;
b = b - d_o / dd_o;
} // end FOR 1
if (xf == xf && yf == yf && zf == zf && N == N && b == b && sx == sx && sy == sy && b == b) { // begin is numeric if statement
if (N > 0 && xf >= -3 && xf <= 3 && yf <= 3 && yf >= -3) { // was the molecule inside the image? Was N positive? if yes then record the point
// Proper Reporting Section
/*
d_xf_all[index] = xf; // correct position for x
d_yf_all[index] = yf; // correct position for y
d_zf_all[index] = zf;
d_N[index] = N;
d_off[index] = b;
d_llv[index] = llv;
*/
// Debugging Section Keep Commented for regular operation
d_xf_all[index] = db1; // correct position for x
d_yf_all[index] = db2; // correct position for y
d_zf_all[index] = db3;
d_N[index] = db4;
d_off[index] = db5;
d_llv[index] = db6;
// calculate crlb's for estimators
// updated for zf
det_fish = device_det(fisher); // these values were determined using a homemade Python code called cofacs.py and text_det.py and checking against lower rank matricies
d_xf_crlb[index] = (fisher[6] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[11] * (fisher[7] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) + fisher[16] * (fisher[7] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[22] * (fisher[8] * fisher[14] - fisher[13] * fisher[9])) - fisher[21] * (fisher[7] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) + fisher[17] * (fisher[8] * fisher[14] - fisher[13] * fisher[9]))) / det_fish;
d_yf_crlb[index] = -(fisher[0] * (fisher[12] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) + fisher[22] * (fisher[13] * fisher[19] - fisher[18] * fisher[14])) - fisher[10] * (fisher[2] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[17] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[15] * (fisher[2] * (fisher[13] * fisher[24] - fisher[23] * fisher[14]) - fisher[12] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[22] * (fisher[3] * fisher[14] - fisher[13] * fisher[4])) - fisher[20] * (fisher[2] * (fisher[13] * fisher[19] - fisher[18] * fisher[14]) - fisher[12] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[17] * (fisher[3] * fisher[14] - fisher[13] * fisher[4]))) / det_fish;
d_N_crlb[index] = +(fisher[0] * (fisher[6] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[16] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) + fisher[21] * (fisher[8] * fisher[19] - fisher[18] * fisher[9])) - fisher[5] * (fisher[1] * (fisher[18] * fisher[24] - fisher[23] * fisher[19]) - fisher[16] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[21] * (fisher[3] * fisher[19] - fisher[18] * fisher[4])) + fisher[15] * (fisher[1] * (fisher[8] * fisher[24] - fisher[23] * fisher[9]) - fisher[6] * (fisher[3] * fisher[24] - fisher[23] * fisher[4]) + fisher[21] * (fisher[3] * fisher[9] - fisher[8] * fisher[4])) - fisher[20] * (fisher[1] * (fisher[8] * fisher[19] - fisher[18] * fisher[9]) - fisher[6] * (fisher[3] * fisher[19] - fisher[18] * fisher[4]) + fisher[16] * (fisher[3] * fisher[9] - fisher[8] * fisher[4]))) / det_fish;
d_off_crlb[index] = -(fisher[0] * (fisher[6] * (fisher[12] * fisher[24] - fisher[22] * fisher[14]) - fisher[11] * (fisher[7] * fisher[24] - fisher[22] * fisher[9]) + fisher[21] * (fisher[7] * fisher[14] - fisher[12] * fisher[9])) - fisher[5] * (fisher[1] * (fisher[12] * fisher[24] - fisher[22] * fisher[14]) - fisher[11] * (fisher[2] * fisher[24] - fisher[22] * fisher[4]) + fisher[21] * (fisher[2] * fisher[14] - fisher[12] * fisher[4])) + fisher[10] * (fisher[1] * (fisher[7] * fisher[24] - fisher[22] * fisher[9]) - fisher[6] * (fisher[2] * fisher[24] - fisher[22] * fisher[4]) + fisher[21] * (fisher[2] * fisher[9] - fisher[7] * fisher[4])) - fisher[20] * (fisher[1] * (fisher[7] * fisher[14] - fisher[12] * fisher[9]) - fisher[6] * (fisher[2] * fisher[14] - fisher[12] * fisher[4]) + fisher[11] * (fisher[2] * fisher[9] - fisher[7] * fisher[4]))) / det_fish;
d_zf_crlb[index] = +(fisher[0] * (fisher[6] * (fisher[12] * fisher[18] - fisher[17] * fisher[13]) - fisher[11] * (fisher[7] * fisher[18] - fisher[17] * fisher[8]) + fisher[16] * (fisher[7] * fisher[13] - fisher[12] * fisher[8])) - fisher[5] * (fisher[1] * (fisher[12] * fisher[18] - fisher[17] * fisher[13]) - fisher[11] * (fisher[2] * fisher[18] - fisher[17] * fisher[3]) + fisher[16] * (fisher[2] * fisher[13] - fisher[12] * fisher[3])) + fisher[10] * (fisher[1] * (fisher[7] * fisher[18] - fisher[17] * fisher[8]) - fisher[6] * (fisher[2] * fisher[18] - fisher[17] * fisher[3]) + fisher[16] * (fisher[2] * fisher[8] - fisher[7] * fisher[3])) - fisher[15] * (fisher[1] * (fisher[7] * fisher[13] - fisher[12] * fisher[8]) - fisher[6] * (fisher[2] * fisher[13] - fisher[12] * fisher[3]) + fisher[11] * (fisher[2] * fisher[8] - fisher[7] * fisher[3]))) / det_fish;
}
else { // if localization failed set all parameters to -1. These can easily be identified by molecules with framenum_all -1
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_zf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_zf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_llv[index] = llv;
}
} //end is numeric if statement
else {
d_xf_all[index] = -1;
d_yf_all[index] = -1;
d_zf_all[index] = -1;
d_N[index] = -1;
d_off[index] = -1;
d_xf_crlb[index] = -1;
d_yf_crlb[index] = -1;
d_zf_crlb[index] = -1;
d_N_crlb[index] = -1;
d_off_crlb[index] = -1;
d_llv[index] = llv;
} // end else fail statement
}
} // end localize 9
/*
* Host code
*
*
*/
void mexFunction(int nlhs, mxArray *plhs[],
int nrhs, mxArray const *prhs[])
{
/* Declare all variables.*/
float *iall; // the pointer to the array of all images to be analyzed
float *d_iall; // Pointer to image array on gpu
float *zcurve;
float *d_zcurve;
float angle;
float *d_xf_all;
float *d_yf_all;
float *d_zf_all;
float *d_N;
float *d_off;
float *d_llv;
float *d_xf_crlb;
float *d_yf_crlb;
float *d_zf_crlb;
float *d_N_crlb;
float *d_off_crlb;
float *xf, *xfc, *yf, *yfc, *n, *nc, *zf, *zfc, *off, *offc, *llv;
size_t threadsperblock;
int irow; // number of pixels in a row which should also be the number in a coloumn
int numi; // number of images imported
const size_t *idims;
/* Throw an error if the input does not match expectations. */
if (nrhs != 4) {
printf("Must have 4 inputs ( i1, numthreads, angle(in rads), defocusing constants)\n");
mexErrMsgTxt("See Error above!\n");
}
// Error statement to handle ilocs being type single
if (!mxIsSingle(prhs[0]) || mxIsComplex(prhs[0])) {
printf("i1 must be a nxm float array\n");
mexErrMsgTxt("See Error above!\n");
}
// Error statement if angle is not type single
if (!mxIsSingle(prhs[3]) || mxIsComplex(prhs[3])) {
printf("angle must be a single\n");
mexErrMsgTxt("See Error above!\n");
}
if (!mxIsSingle(prhs[1]) || mxIsComplex(prhs[1])) {
printf("defocus constants must be of type single\n");
mexErrMsgTxt("See Error above!\n");
}
// get pointer to input arguments
iall = (float *)mxGetPr(prhs[0]); // matlab linearizes in a coloumn major format which affects indexing (Writing MAtlab C/MEX Code - Research Gate)
idims = mxGetDimensions(prhs[0]); // get dimensions of image array
irow = (int)idims[0];
numi = (int)idims[1];
angle = (float)mxGetScalar(prhs[3]);
zcurve = (float *)mxGetPr(prhs[1]);
if (numi > 1000000 || numi < 1) {
numi = 1;
}
int imem = irow*numi * sizeof(float);
int vmem = numi * sizeof(float);
// verify that the input variables are what was expected
// check that iloc is a *perfect square* by num_mol array
if (irow != 169) {
printf("Images are of incorrect size. There must be a perfect square number of rows in the entry.\n");
mexErrMsgTxt("See Error above!\n");
}
if (nlhs != 11) {
printf("You must have 11 output variables [xf_all, yf_all, zf_all, N, off_all, xf_crlb, yf_crlb, zf_crlb, N_crlb, off_crlb, llv_all]\n");
mexErrMsgTxt("See Error above!\n");
}
// allocate memory and copy it onto the gpu device
// iall
checkCudaErrors(cudaMalloc((void**)&d_iall, imem)); // allocate image memory
checkCudaErrors(cudaMalloc((void**)&d_zcurve, sizeof(float) * 10)); // allocate image memory
checkCudaErrors(cudaMemcpy(d_iall, iall, imem, cudaMemcpyHostToDevice)); // copy images from device to host
checkCudaErrors(cudaMemcpy(d_zcurve, zcurve, sizeof(float) * 10, cudaMemcpyHostToDevice));
// allocate memory for fitted variables that will be returned from device
checkCudaErrors(cudaMalloc((void**)&d_xf_all, vmem)); // allocate xf_all memory
checkCudaErrors(cudaMalloc((void**)&d_xf_crlb, vmem)); // allocate xf_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_yf_all, vmem)); // allocate yf_all memory
checkCudaErrors(cudaMalloc((void**)&d_yf_crlb, vmem)); // allocate yf_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_zf_all, vmem)); // allocate zf memory
checkCudaErrors(cudaMalloc((void**)&d_zf_crlb, vmem)); // allocate zf_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_N, vmem)); // allocate N memory
checkCudaErrors(cudaMalloc((void**)&d_N_crlb, vmem)); // allocate N_crlb memory
checkCudaErrors(cudaMalloc((void**)&d_off, vmem)); // allocate off memory
checkCudaErrors(cudaMalloc((void**)&d_off_crlb, vmem)); // allocate N memory
checkCudaErrors(cudaMalloc((void**)&d_llv, vmem)); // allocate llv memory
/* Run GPU kernel*/
threadsperblock = mxGetScalar(prhs[2]); // get number of threads perblock from matlab
localize13 << <((numi - 1) / threadsperblock + 1), threadsperblock >> >(d_iall, d_xf_all, d_yf_all, d_zf_all, d_N, d_off, d_xf_crlb, d_yf_crlb, d_zf_crlb, d_N_crlb, d_off_crlb, d_llv, angle, d_zcurve, numi);
// Allocate host side memory for output arrays at the output pointer positions
plhs[0] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[1] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[2] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[3] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[4] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[5] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[6] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
plhs[7] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[8] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[9] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
plhs[10] = mxCreateNumericMatrix(numi, 1, mxSINGLE_CLASS, mxREAL);
// Copy pointers from mex array
xf = (float *)mxGetPr(plhs[0]);
xfc = (float *)mxGetPr(plhs[1]);
yf = (float *)mxGetPr(plhs[2]);
yfc = (float *)mxGetPr(plhs[3]);
zf = (float *)mxGetPr(plhs[4]);
zfc = (float *)mxGetPr(plhs[5]);
n = (float *)mxGetPr(plhs[6]);
nc = (float *)mxGetPr(plhs[7]);
off = (float *)mxGetPr(plhs[8]);
offc = (float *)mxGetPr(plhs[9]);
llv = (float *)mxGetPr(plhs[10]); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
// copy memory from device to host memory at mex array pointers
checkCudaErrors(cudaMemcpy(xf, d_xf_all, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(xfc, d_xf_crlb, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(yf, d_yf_all, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(yfc, d_yf_crlb, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(zf, d_zf_all, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(zfc, d_zf_crlb, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(n, d_N, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(nc, d_N_crlb, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(off, d_off, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(offc, d_off_crlb, vmem, cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(llv, d_llv, vmem, cudaMemcpyDeviceToHost)); // checked so far as of 2/7/18 for updates to fit zf from full_chain_loc
// clean up
cudaFree(d_iall);
cudaFree(d_N);
cudaFree(d_xf_all);
cudaFree(d_yf_all);
cudaFree(d_zf_all);
cudaFree(d_off);
cudaFree(d_xf_crlb);
cudaFree(d_yf_crlb);
cudaFree(d_zf_crlb);
cudaFree(d_N_crlb);
cudaFree(d_off_crlb);
cudaFree(d_llv);
cudaFree(d_zcurve);
} // DONE
|
bad373bc2fd06e48e0b78a0f1fa3eb3b847b7c1d.hip | // !!! This is a file automatically generated by hipify!!!
#include "common/book.h"
#include "common/cpu_bitmap.h"
#include <hip/hip_runtime.h>
#include <cuda_device_runtime_api.h>
#include <hip/hip_runtime.h>
#define DIM 1000
struct hipComplex {
float r;
float i;
__device__ hipComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) { return r * r + i * i; }
__device__ hipComplex operator*(hipComplex const &a) {
return hipComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__device__ hipComplex operator+(hipComplex const &a) {
return hipComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
float const scale = 1.5;
float const jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float const jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
hipComplex c(-0.8, 0.156);
hipComplex a(jx, jy);
for (int i = 0; i < 200; ++i) {
a = a * a + c;
if (a.magnitude2() > 1000) {
return 0;
}
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int julia_value = julia(x, y);
ptr[offset * 4 + 0] = 255 * julia_value;
ptr[offset * 4 + 1] = 255 * julia_value;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(hipMalloc((void **)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
hipLaunchKernelGGL(( kernel), dim3(grid), dim3(1), 0, 0, dev_bitmap);
HANDLE_ERROR(hipMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(),
hipMemcpyDeviceToHost));
bitmap.display_and_exit();
hipFree(dev_bitmap);
return 0;
}
| bad373bc2fd06e48e0b78a0f1fa3eb3b847b7c1d.cu | #include "common/book.h"
#include "common/cpu_bitmap.h"
#include <cuda.h>
#include <cuda_device_runtime_api.h>
#include <cuda_runtime.h>
#define DIM 1000
struct cuComplex {
float r;
float i;
__device__ cuComplex(float a, float b) : r(a), i(b) {}
__device__ float magnitude2(void) { return r * r + i * i; }
__device__ cuComplex operator*(cuComplex const &a) {
return cuComplex(r * a.r - i * a.i, i * a.r + r * a.i);
}
__device__ cuComplex operator+(cuComplex const &a) {
return cuComplex(r + a.r, i + a.i);
}
};
__device__ int julia(int x, int y) {
float const scale = 1.5;
float const jx = scale * (float)(DIM / 2 - x) / (DIM / 2);
float const jy = scale * (float)(DIM / 2 - y) / (DIM / 2);
cuComplex c(-0.8, 0.156);
cuComplex a(jx, jy);
for (int i = 0; i < 200; ++i) {
a = a * a + c;
if (a.magnitude2() > 1000) {
return 0;
}
}
return 1;
}
__global__ void kernel(unsigned char *ptr) {
int x = blockIdx.x;
int y = blockIdx.y;
int offset = x + y * gridDim.x;
int julia_value = julia(x, y);
ptr[offset * 4 + 0] = 255 * julia_value;
ptr[offset * 4 + 1] = 255 * julia_value;
ptr[offset * 4 + 2] = 0;
ptr[offset * 4 + 3] = 255;
}
int main(void) {
CPUBitmap bitmap(DIM, DIM);
unsigned char *dev_bitmap;
HANDLE_ERROR(cudaMalloc((void **)&dev_bitmap, bitmap.image_size()));
dim3 grid(DIM, DIM);
kernel<<<grid, 1>>>(dev_bitmap);
HANDLE_ERROR(cudaMemcpy(bitmap.get_ptr(), dev_bitmap, bitmap.image_size(),
cudaMemcpyDeviceToHost));
bitmap.display_and_exit();
cudaFree(dev_bitmap);
return 0;
}
|
92dc98563531a088197424ae71df281a13156518.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Discrete Cosine Transform in Column wise (DCT one)
* DCT_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTI_Column_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cosf((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTColumnOne(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
hipError_t error;
int devID = 0;
// get number of SMs on this GPU
error = hipGetDevice(&devID);
hipDeviceProp_t deviceProp;
error = hipGetDeviceProperties(&deviceProp, devID);
if (error != hipSuccess)
{
printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(hipMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//hipMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(hipMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(hipMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, hipMemcpyHostToDevice));
//hipMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, hipMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(hipPeekAtLastError());
gpuErrchk(hipDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(hipMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, hipMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(hipFree(deviceA));
//hipFree(deviceB);
gpuErrchk(hipFree(deviceC));
return;
}
}
| 92dc98563531a088197424ae71df281a13156518.cu | /*
* Discrete Cosine Transform in Column wise (DCT one)
* DCT_I_Column
* This CUDA code can handle/work with any type of the input mxArrays,
* GPUarray or standard matlab CPU array as input {prhs[0] := mxGPUArray or CPU Array}
* gpuArray output, B=DCT_I_Column(A)=mexFunction(A).
* Developed at UCL, Institute of Neurology, 12 Queen Square, WC1N 3AR, London
* Wellcome Trust Centre for Neuroimaging
* Part of the project SPM(http://www.fil.ion.ucl.ac.uk/spm)
* Copyright 2018
* Kevin Bronik
*/
#include "matrix.h"
#include "DCT_I_Column.cuh"
#include "mex.h"
#include "gpu/mxGPUArray.h"
#include <cuda.h>
#include <cuda_runtime.h>
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// #define TILE_DIM 16
#define DEFAULT_DIM 32 // Tile dimension
#define DELTA(i, j) ((i==j)?1:0)
//const float PI_d = 3.141592653589793238462643383279502884f; //pi
template <unsigned int TILE_DIM > __global__ void DCTI_Column_Kernel(float *A, float *C,
int numARows, int numAColumns,
int numCRows, int numCColumns)
{
float CValue = 0.0f;
const float PI_d = 3.141592653589793238462643383279502884f; //pi
int Row = blockIdx.y*TILE_DIM + threadIdx.y;
int Col = blockIdx.x*TILE_DIM + threadIdx.x;
__shared__ float As[TILE_DIM][TILE_DIM];
__shared__ float Bs[TILE_DIM][TILE_DIM];
for (int k = 0; k < (TILE_DIM + numARows - 1) / TILE_DIM; k++) {
if (k*TILE_DIM + threadIdx.x < numARows && Row < numARows) { As[threadIdx.y][threadIdx.x] = cosf((Row*PI_d*(threadIdx.x + (k*TILE_DIM)) / (numARows - 1)))*sqrt(1.0 / (1 + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, 1) + DELTA((threadIdx.x + (k*TILE_DIM)) + 1, numARows)))*sqrt(1.0 / (1 + DELTA(1, Row + 1) + DELTA(numARows, Row + 1)))*sqrt(2.0 / numARows); }
//As[threadIdx.y][threadIdx.x] = A[Row*ACols + k*TILE_DIM + threadIdx.x];
else { As[threadIdx.y][threadIdx.x] = 0.0; }
if (k*TILE_DIM + threadIdx.y < numARows && Col < numAColumns){ Bs[threadIdx.y][threadIdx.x] = A[(k*TILE_DIM + threadIdx.y)*numAColumns + Col]; }
else { Bs[threadIdx.y][threadIdx.x] = 0.0; }
__syncthreads();
for (int n = 0; n < TILE_DIM; ++n) { CValue += As[threadIdx.y][n] * Bs[n][threadIdx.x]; }
__syncthreads();
}
if (Row < numCRows && Col < numCColumns) { C[((blockIdx.y * blockDim.y + threadIdx.y)*numCColumns) + (blockIdx.x*blockDim.x) + threadIdx.x] = CValue; }
}
// Matrix multiplication - Host code
// Matrix dimensions are assumed to be multiples of BLOCK_SIZE
extern "C" void CalculateTransformDCTColumnOne(float * A, float * C, int numARows,
int numAColumns, int numCRows, int numCColumns)
{
float * hostA = A; // The A matrix
//float * hostB = B; // The B matrix
float * hostC = C; // The output C matrix
//float * hostComputedC;
float * deviceA;
//float * deviceB;
float * deviceC;
//hostA = (float *)malloc(sizeof(float)*numARows*numAColumns);
cudaError_t error;
int devID = 0;
// get number of SMs on this GPU
error = cudaGetDevice(&devID);
cudaDeviceProp deviceProp;
error = cudaGetDeviceProperties(&deviceProp, devID);
if (error != cudaSuccess)
{
printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__);
exit(EXIT_FAILURE);
}
int TILEDIM = (deviceProp.major < 2) ? 16 : 32;
// Setting numCRows and numCColumns
numCRows = numARows;
numCColumns = numAColumns;
//hostC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
//hostComputedC = (float *)malloc(sizeof(float)*numCRows*numCColumns);
// Allocating GPU memory
gpuErrchk(cudaMalloc((void **)&deviceA, sizeof(float)*numARows*numAColumns));
//cudaMalloc((void **)&deviceB, sizeof(float)*numBRows*numBColumns);
gpuErrchk(cudaMalloc((void **)&deviceC, sizeof(float)*numCRows*numCColumns));
//thrust::device_ptr< float >dev_ptr_A(deviceA);
//thrust::device_ptr< float >dev_ptr_C(deviceC);
// Copy memory to the GPU
gpuErrchk(cudaMemcpy(deviceA, hostA, sizeof(float)*numARows*numAColumns, cudaMemcpyHostToDevice));
//cudaMemcpy(deviceB, hostB, sizeof(float)*numBRows*numBColumns, cudaMemcpyHostToDevice);
/////////////////////////////////////////////////////////
unsigned int TILE_DIM=16;
dim3 dimBlock;
dim3 dimGrid;
switch (TILEDIM){
case 16:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Kernel <16> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
case 32:
TILE_DIM= TILEDIM;
dimBlock.x=TILE_DIM;
dimBlock.y=TILE_DIM;
dimBlock.z=1;
dimGrid.x = (numCColumns + dimBlock.x - 1) / dimBlock.x;
dimGrid.y = (numCRows + dimBlock.y - 1) / dimBlock.y;
DCTI_Column_Kernel <32> << <dimGrid, dimBlock >> >(deviceA, deviceC, numARows, numAColumns, numCRows, numCColumns);
//matrixMultiplyShared << <dimGrid, dimBlock >> >(thrust::raw_pointer_cast(&dev_ptr_A[0]), thrust::raw_pointer_cast(&dev_ptr_C[0]), numARows, numAColumns, numCRows, numCColumns);
gpuErrchk(cudaPeekAtLastError());
gpuErrchk(cudaDeviceSynchronize());
// Copy the results in GPU memory back to the CPU
gpuErrchk(cudaMemcpy(hostC, deviceC, sizeof(float)*numCRows*numCColumns, cudaMemcpyDeviceToHost));
C = hostC;
//thrust::device_free(dev_ptr_A);
//thrust::device_free(dev_ptr_C);
gpuErrchk(cudaFree(deviceA));
//cudaFree(deviceB);
gpuErrchk(cudaFree(deviceC));
return;
}
}
|
bd607f8f691edee31a43d9a3c9b80abcce72e9e9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if GOOGLE_CUDA
#include "smooth_L1_gpu.hpp"
__global__ void SmoothL1Forward( const float* diffs, float* outputs, float sigma2, int ndata )
{
CUDA_1D_KERNEL_LOOP( i, ndata )
{
float val = diffs[i];
float abs_val = abs(val);
if( abs_val < 1.0/sigma2 )
outputs[i] = 0.5*val*val*sigma2;
else
outputs[i] = abs_val - 0.5/sigma2;
}
}
__global__ void SmoothL1Backward( const float* diffs, const float* top_grad, float* bottom_grad, float sigma2, int ndata )
{
CUDA_1D_KERNEL_LOOP( i, ndata )
{
float val = diffs[i];
float abs_val = abs(val);
if( abs_val < 1.0/sigma2 )
{
bottom_grad[i] = val * sigma2;
}
else
{
bottom_grad[i] = (float(0) < val) - (val < float(0));
}
bottom_grad[i] *= top_grad[i];
}
}
void smooth_l1_forward_gpu( const float* diffs, float* output, float sigma2, int ndata )
{
int total_blocks = (ndata+kThreadsPerBlock-1)/kThreadsPerBlock;
hipLaunchKernelGGL(( SmoothL1Forward), dim3(total_blocks),dim3(kThreadsPerBlock), 0, 0, diffs, output, sigma2, ndata );
hipError_t err;
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
}
void smooth_l1_backward_gpu( const float* diffs, const float* top_grad, float* bottom_grad, float sigma2, int ndata )
{
int total_blocks = (ndata+kThreadsPerBlock-1)/kThreadsPerBlock;
hipLaunchKernelGGL(( SmoothL1Backward), dim3(total_blocks),dim3(kThreadsPerBlock), 0, 0, diffs, top_grad, bottom_grad, sigma2, ndata );
hipError_t err;
err = hipGetLastError();
if(hipSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", hipGetErrorString( err ) );
exit( -1 );
}
}
#endif
| bd607f8f691edee31a43d9a3c9b80abcce72e9e9.cu | #if GOOGLE_CUDA
#include "smooth_L1_gpu.hpp"
__global__ void SmoothL1Forward( const float* diffs, float* outputs, float sigma2, int ndata )
{
CUDA_1D_KERNEL_LOOP( i, ndata )
{
float val = diffs[i];
float abs_val = abs(val);
if( abs_val < 1.0/sigma2 )
outputs[i] = 0.5*val*val*sigma2;
else
outputs[i] = abs_val - 0.5/sigma2;
}
}
__global__ void SmoothL1Backward( const float* diffs, const float* top_grad, float* bottom_grad, float sigma2, int ndata )
{
CUDA_1D_KERNEL_LOOP( i, ndata )
{
float val = diffs[i];
float abs_val = abs(val);
if( abs_val < 1.0/sigma2 )
{
bottom_grad[i] = val * sigma2;
}
else
{
bottom_grad[i] = (float(0) < val) - (val < float(0));
}
bottom_grad[i] *= top_grad[i];
}
}
void smooth_l1_forward_gpu( const float* diffs, float* output, float sigma2, int ndata )
{
int total_blocks = (ndata+kThreadsPerBlock-1)/kThreadsPerBlock;
SmoothL1Forward<<<total_blocks,kThreadsPerBlock>>>( diffs, output, sigma2, ndata );
cudaError_t err;
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
}
void smooth_l1_backward_gpu( const float* diffs, const float* top_grad, float* bottom_grad, float sigma2, int ndata )
{
int total_blocks = (ndata+kThreadsPerBlock-1)/kThreadsPerBlock;
SmoothL1Backward<<<total_blocks,kThreadsPerBlock>>>( diffs, top_grad, bottom_grad, sigma2, ndata );
cudaError_t err;
err = cudaGetLastError();
if(cudaSuccess != err)
{
fprintf( stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString( err ) );
exit( -1 );
}
}
#endif
|
8675f59c30dd0cf501a5c86c34f971e105c52051.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <limits.h>
#include <stddef.h>
__global__ void fun(int *y){
if (sizeof(ptrdiff_t) == sizeof(int)) {
unsigned char *ptr0 = malloc(((unsigned)INT_MAX) + 1);
unsigned char *ptr1 = ptr0 + (unsigned)INT_MAX + 1;
ptr1 - ptr0;
*y = ptr1 - ptr0;
printf("%ld\n", *y);
}
//return 0;
}
int main(void)
{
int y;
int *dev_y;
hipMalloc((void**)&dev_y, sizeof(int));
hipLaunchKernelGGL(( fun), dim3(1),dim3(1), 0, 0, dev_y);
hipMemcpy(&y, dev_y, sizeof(int), hipMemcpyDeviceToHost);
hipFree(dev_y);
return 0;
}
//j050a.cu(12): error: a value of type "void *" cannot be used to initialize an entity of type "unsigned char *"
| 8675f59c30dd0cf501a5c86c34f971e105c52051.cu | #include <stdlib.h>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <math.h>
#include <limits.h>
#include <stddef.h>
__global__ void fun(int *y){
if (sizeof(ptrdiff_t) == sizeof(int)) {
unsigned char *ptr0 = malloc(((unsigned)INT_MAX) + 1);
unsigned char *ptr1 = ptr0 + (unsigned)INT_MAX + 1;
ptr1 - ptr0;
*y = ptr1 - ptr0;
printf("%ld\n", *y);
}
//return 0;
}
int main(void)
{
int y;
int *dev_y;
cudaMalloc((void**)&dev_y, sizeof(int));
fun<<<1,1>>>(dev_y);
cudaMemcpy(&y, dev_y, sizeof(int), cudaMemcpyDeviceToHost);
cudaFree(dev_y);
return 0;
}
//j050a.cu(12): error: a value of type "void *" cannot be used to initialize an entity of type "unsigned char *"
|
93a3f0005f0c2e341014468b5c1103cc34d0f89d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "src/utils.hpp"
#define min(a, b) a < b ? a : b
__device__ void cross(
const float* __restrict__ a,
const float* __restrict__ b,
float* __restrict__ c
) {
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
}
__device__ void sub(
const float* __restrict__ a,
const float* __restrict__ b,
float* __restrict__ c
) {
c[0] = a[0] - b[0];
c[1] = a[1] - b[1];
c[2] = a[2] - b[2];
}
__device__ float dot(const float* __restrict__ a, const float* __restrict__ b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
__device__ float signed_volume(
const float* __restrict__ a,
const float* __restrict__ b,
const float* __restrict__ c,
const float* __restrict__ d
) {
float diff_b_a[3];
float diff_c_a[3];
float diff_d_a[3];
float cross_diff_b_a_diff_c_a[3];
sub(b, a, diff_b_a);
sub(c, a, diff_c_a);
sub(d, a, diff_d_a);
cross(diff_b_a, diff_c_a, cross_diff_b_a_diff_c_a);
return 1.0 / 6.0 * dot(cross_diff_b_a_diff_c_a, diff_d_a);
}
__device__ bool same_sign(float value_1, float value_2) {
return (int)(value_1 < 0) == (int)(value_2 < 0);
}
__device__ bool same_sign3(float value_1, float value_2, float value_3) {
return (value_1 < 0) == (value_2 < 0) && (value_1 < 0) == (value_3 < 0) && (value_2 < 0) == (value_3 < 0);
}
__device__ float signed_area(
const float * __restrict__ x1,
const float * __restrict__ x2,
const float * __restrict__ a,
const float * __restrict__ w
) {
float diff_x1_a[3];
sub(x1, a, diff_x1_a);
float diff_x2_a[3];
sub(x2, a, diff_x2_a);
float cross_diffs[3];
cross(diff_x1_a, diff_x2_a, cross_diffs);
return 0.5 * dot(cross_diffs, w);
}
__global__ void watertightness_kernel(
const float* __restrict__ ray_origins,
const float* __restrict__ ray_directions,
const float* __restrict__ triangles,
float* __restrict__ passed_test,
int n_rays,
int n_triangles
) {
for (
int i = blockDim.x * blockIdx.x + threadIdx.x;
i < n_rays;
i += gridDim.x
) {
int num_intersections = 0;
for (int triangle_i = 0 ; triangle_i < n_triangles; ++triangle_i) {
const float *current_triangle = &triangles[triangle_i * 3 * 3];
const float *p1 = ¤t_triangle[0];
const float *p2 = ¤t_triangle[3];
const float *p3 = ¤t_triangle[6];
float a1 = signed_area(p1, p2, &ray_origins[i * 3], &ray_directions[i * 3]);
float a2 = signed_area(p2, p3, &ray_origins[i * 3], &ray_directions[i * 3]);
float a3 = signed_area(p3, p1, &ray_origins[i * 3], &ray_directions[i * 3]);
if (same_sign3(a1, a2, a3)) {
num_intersections++;
}
}
passed_test[i] = (float)(num_intersections % 2 == 0);
}
}
void watertightness(
const float* ray_origins,
const float* ray_directions,
const float* triangles,
float *passed_test,
int n_rays,
int n_triangles,
hipStream_t stream
) {
hipLaunchKernelGGL(( watertightness_kernel), dim3(65536), dim3(128), 0, stream,
ray_origins,
ray_directions,
triangles,
passed_test,
n_rays,
n_triangles
);
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
throw std::runtime_error(
Formatter() << "CUDA kernel failed : " << std::to_string(err)
);
}
}
| 93a3f0005f0c2e341014468b5c1103cc34d0f89d.cu | #include "src/utils.hpp"
#define min(a, b) a < b ? a : b
__device__ void cross(
const float* __restrict__ a,
const float* __restrict__ b,
float* __restrict__ c
) {
c[0] = a[1] * b[2] - a[2] * b[1];
c[1] = a[2] * b[0] - a[0] * b[2];
c[2] = a[0] * b[1] - a[1] * b[0];
}
__device__ void sub(
const float* __restrict__ a,
const float* __restrict__ b,
float* __restrict__ c
) {
c[0] = a[0] - b[0];
c[1] = a[1] - b[1];
c[2] = a[2] - b[2];
}
__device__ float dot(const float* __restrict__ a, const float* __restrict__ b) {
return a[0] * b[0] + a[1] * b[1] + a[2] * b[2];
}
__device__ float signed_volume(
const float* __restrict__ a,
const float* __restrict__ b,
const float* __restrict__ c,
const float* __restrict__ d
) {
float diff_b_a[3];
float diff_c_a[3];
float diff_d_a[3];
float cross_diff_b_a_diff_c_a[3];
sub(b, a, diff_b_a);
sub(c, a, diff_c_a);
sub(d, a, diff_d_a);
cross(diff_b_a, diff_c_a, cross_diff_b_a_diff_c_a);
return 1.0 / 6.0 * dot(cross_diff_b_a_diff_c_a, diff_d_a);
}
__device__ bool same_sign(float value_1, float value_2) {
return (int)(value_1 < 0) == (int)(value_2 < 0);
}
__device__ bool same_sign3(float value_1, float value_2, float value_3) {
return (value_1 < 0) == (value_2 < 0) && (value_1 < 0) == (value_3 < 0) && (value_2 < 0) == (value_3 < 0);
}
__device__ float signed_area(
const float * __restrict__ x1,
const float * __restrict__ x2,
const float * __restrict__ a,
const float * __restrict__ w
) {
float diff_x1_a[3];
sub(x1, a, diff_x1_a);
float diff_x2_a[3];
sub(x2, a, diff_x2_a);
float cross_diffs[3];
cross(diff_x1_a, diff_x2_a, cross_diffs);
return 0.5 * dot(cross_diffs, w);
}
__global__ void watertightness_kernel(
const float* __restrict__ ray_origins,
const float* __restrict__ ray_directions,
const float* __restrict__ triangles,
float* __restrict__ passed_test,
int n_rays,
int n_triangles
) {
for (
int i = blockDim.x * blockIdx.x + threadIdx.x;
i < n_rays;
i += gridDim.x
) {
int num_intersections = 0;
for (int triangle_i = 0 ; triangle_i < n_triangles; ++triangle_i) {
const float *current_triangle = &triangles[triangle_i * 3 * 3];
const float *p1 = ¤t_triangle[0];
const float *p2 = ¤t_triangle[3];
const float *p3 = ¤t_triangle[6];
float a1 = signed_area(p1, p2, &ray_origins[i * 3], &ray_directions[i * 3]);
float a2 = signed_area(p2, p3, &ray_origins[i * 3], &ray_directions[i * 3]);
float a3 = signed_area(p3, p1, &ray_origins[i * 3], &ray_directions[i * 3]);
if (same_sign3(a1, a2, a3)) {
num_intersections++;
}
}
passed_test[i] = (float)(num_intersections % 2 == 0);
}
}
void watertightness(
const float* ray_origins,
const float* ray_directions,
const float* triangles,
float *passed_test,
int n_rays,
int n_triangles,
cudaStream_t stream
) {
watertightness_kernel<<<65536, 128, 0, stream>>>(
ray_origins,
ray_directions,
triangles,
passed_test,
n_rays,
n_triangles
);
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
throw std::runtime_error(
Formatter() << "CUDA kernel failed : " << std::to_string(err)
);
}
}
|
64cd2c56bfb7b841bfbfc7ae4215b87e63145f79.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef _FX_
#define _FX_
extern "C"
{
// Set of functions per row in Gw product
__device__ float f_row1(float *Gw, int j){
return Gw[j];
}
__device__ float f_row2(float *Gw, int j){
return 0.5*(Gw[j] + Gw[6+j] + Gw[3+j]);
}
__device__ float f_row3(float *Gw, int j){
return 0.5*(Gw[j] + Gw[6+j] - Gw[3+j]);
}
__device__ float f_row4(float *Gw, int j){
return Gw[6+j];
}
// Set of functions per column in GwGt product
__device__ float f_col1(float *Gw, int j){
return Gw[j];
}
__device__ float f_col2(float *Gw, int j){
return 0.5*(Gw[j] + Gw[j+2] + Gw[j+1]);
}
__device__ float f_col3(float *Gw, int j){
return 0.5*(Gw[j] + Gw[j+2] - Gw[j+1]);
}
__device__ float f_col4(float *Gw, int j){
return Gw[j+2];
}
typedef float(*pointFunction_t)(float *, int);
__global__ void FX(float *pInputs, float *pOutputs, int filt_k,
int filt_c, int filt_h, int filt_w, int alpha){
int Inx = threadIdx.x, Iny = threadIdx.y;
int TileX = blockIdx.x, TileY = blockIdx.y;
int c_glb_offset = filt_k*filt_h*filt_w;
int c_kernel = TileY*BC*c_glb_offset + TileX*BK + Iny*c_glb_offset + Inx;
int c_glb_offset_s = filt_k*4*4;
int c_kernel_s = TileY*BC*c_glb_offset_s + TileX*BK + Iny*c_glb_offset_s + Inx;
float Gw[21]; //9+12. In registers
float *Gw_buffer = Gw+9;
pointFunction_t func1[4] = {f_row1, f_row2, f_row3, f_row4};
pointFunction_t func2[4] = {f_col1, f_col2, f_col3, f_col4};
for(int bk=0; bk<BK; bk+=blockDim.x){
for(int i=0; i<9; i++){
Gw[i] = pInputs[c_kernel + i*filt_k];
}
int aux;
for(int i=0; i<4; i++){
aux = i*3;
for(int j=0; j<3; j++){
Gw_buffer[j+aux] = (*func1[i])(Gw, j);
}
}
int aux2;
for(int i=0; i<4; i++){
aux = i*3; aux2 = i<<2;
for(int j=0; j<4; j++){
pOutputs[c_kernel_s+aux2*filt_k+j*filt_k] = (*func2[j])(Gw_buffer, aux);
}
}
c_kernel += blockDim.x;
c_kernel_s += blockDim.x;
}
}
}
#endif | 64cd2c56bfb7b841bfbfc7ae4215b87e63145f79.cu |
// Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef _FX_
#define _FX_
extern "C"
{
// Set of functions per row in Gw product
__device__ float f_row1(float *Gw, int j){
return Gw[j];
}
__device__ float f_row2(float *Gw, int j){
return 0.5*(Gw[j] + Gw[6+j] + Gw[3+j]);
}
__device__ float f_row3(float *Gw, int j){
return 0.5*(Gw[j] + Gw[6+j] - Gw[3+j]);
}
__device__ float f_row4(float *Gw, int j){
return Gw[6+j];
}
// Set of functions per column in GwGt product
__device__ float f_col1(float *Gw, int j){
return Gw[j];
}
__device__ float f_col2(float *Gw, int j){
return 0.5*(Gw[j] + Gw[j+2] + Gw[j+1]);
}
__device__ float f_col3(float *Gw, int j){
return 0.5*(Gw[j] + Gw[j+2] - Gw[j+1]);
}
__device__ float f_col4(float *Gw, int j){
return Gw[j+2];
}
typedef float(*pointFunction_t)(float *, int);
__global__ void FX(float *pInputs, float *pOutputs, int filt_k,
int filt_c, int filt_h, int filt_w, int alpha){
int Inx = threadIdx.x, Iny = threadIdx.y;
int TileX = blockIdx.x, TileY = blockIdx.y;
int c_glb_offset = filt_k*filt_h*filt_w;
int c_kernel = TileY*BC*c_glb_offset + TileX*BK + Iny*c_glb_offset + Inx;
int c_glb_offset_s = filt_k*4*4;
int c_kernel_s = TileY*BC*c_glb_offset_s + TileX*BK + Iny*c_glb_offset_s + Inx;
float Gw[21]; //9+12. In registers
float *Gw_buffer = Gw+9;
pointFunction_t func1[4] = {f_row1, f_row2, f_row3, f_row4};
pointFunction_t func2[4] = {f_col1, f_col2, f_col3, f_col4};
for(int bk=0; bk<BK; bk+=blockDim.x){
for(int i=0; i<9; i++){
Gw[i] = pInputs[c_kernel + i*filt_k];
}
int aux;
for(int i=0; i<4; i++){
aux = i*3;
for(int j=0; j<3; j++){
Gw_buffer[j+aux] = (*func1[i])(Gw, j);
}
}
int aux2;
for(int i=0; i<4; i++){
aux = i*3; aux2 = i<<2;
for(int j=0; j<4; j++){
pOutputs[c_kernel_s+aux2*filt_k+j*filt_k] = (*func2[j])(Gw_buffer, aux);
}
}
c_kernel += blockDim.x;
c_kernel_s += blockDim.x;
}
}
}
#endif |
00e6c6182020659e563b9741b0e3b7e88b13a134.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <vector>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <cuda_device_runtime_api.h>
#include <cudalibxt.h>
#include <device_launch_parameters.h>
#include <hip/device_functions.h>
#include <sys/time.h>
#define num_loops 10000
#define num_data 256
#define data_max_value 10000
#define BLOCK_SIZE 1024
__global__ void kernel_kmeans(int *data, int *centroids, int numdata)
{
int row = blockIdx.x * BLOCK_SIZE + threadIdx.y;
// int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float d_c0, d_c1, d_c2;
float2 pt;
float2 ctr0,ctr1,ctr2;
pt.x = data[(row * 3) + 0];
pt.y = data[(row * 3) + 1];
ctr0.x = centroids[(0 * 2) + 0];
ctr0.y = centroids[(0 * 2) + 1];
ctr1.x = centroids[(1 * 2) + 0];
ctr1.y = centroids[(1 * 2) + 1];
ctr2.x = centroids[(2 * 2) + 0];
ctr2.y = centroids[(2 * 2) + 1];
d_c0 = hypot(pt.x-ctr0.x,pt.y-ctr0.y);
d_c1 = hypot(pt.x-ctr1.x,pt.y-ctr1.y);
d_c2 = hypot(pt.x-ctr2.x,pt.y-ctr2.y);
if ((int)d_c0 < (int)d_c1 && (int)d_c0 < (int)d_c2)
data[(3 * row) + 2] = 0;
else if ((int)d_c1 < (int)d_c0 && (int)d_c1 < (int)d_c2)
data[(3 * row) + 2] = 1;
else if ((int)d_c2 < (int)d_c0 && (int)d_c2 < (int)d_c1)
data[(3 * row) + 2] = 2;
}
int main()
{
struct timeval start;
struct timeval end;
double elapsedTime;
//int h_data[num_data][3];
//int h_centroids[3][2];
srand(time(NULL));
int *h_data;
int *h_centroids;
//srand(time(NULL));
h_data = (int*)malloc(num_data * 3 * sizeof(int));
h_centroids = (int*)malloc(3 * 2 * sizeof(int));
for (int i = 0; i < num_data; i++)
{
for (int j = 0; j < 2; j++)
{
h_data[ i* 3 + j] = (rand() % data_max_value) + 1;
//h_data[ i][j] = 1;//(rand() % data_max_value) + 1;
}
}
for (int i = 0; i < 3; i++)
{
int index = rand() % num_data;
h_centroids[i*2+0] = h_data[index*3+0];
h_centroids[i*2+1] = h_data[index*3+1];
//cout << "centroid:" << i << "is" << h_centroids[i*2+0] << "," << centroids[i*2+1] << endl;
//printf("centroid %d is %d,%d\n",i,h_centroids[i*2+0],h_centroids[i*2+1]);
//printf("\n");
}
int *d_data;
int *d_centroids;
hipMalloc(&d_data , num_data*3*sizeof(int));
hipMalloc(&d_centroids , 3*2*sizeof(int));
hipMemcpy(d_data, h_data, num_data*3*sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(d_centroids, h_centroids, 3*2*sizeof(int), hipMemcpyHostToDevice);
//dim3 threads(BLOCK_SIZE);
//dim3 grid(num_data/threads.x);
gettimeofday(&start, NULL);
for (int loop = 0; loop < num_loops;loop++)
{
hipMemcpy(d_centroids, h_centroids, 3*2*sizeof(int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( kernel_kmeans), dim3(num_data/BLOCK_SIZE),dim3(BLOCK_SIZE), 0, 0, d_data,d_centroids,num_data);
hipMemcpy(h_data, d_data, num_data*3*sizeof(int), hipMemcpyDeviceToHost);
float c0x_avg = 0, c0y_avg = 0, c1x_avg = 0, c1y_avg = 0, c2x_avg = 0, c2y_avg = 0;
int c0_count = 0, c1_count = 0, c2_count = 0;
// moving the centroid step
for (int i = 0; i < num_data; i++)
{
if (h_data[i*3+2] == 0)
{
c0x_avg = c0x_avg + h_data[i*3+0];
c0y_avg = c0y_avg + h_data[i*3+1];
c0_count++;
}
else if (h_data[i*3+2] == 1)
{
c1x_avg = c1x_avg + h_data[i*3+0];
c1y_avg = c1y_avg + h_data[i*3+1];
c1_count++;
}
else if (h_data[i*3+2] == 2)
{
c2x_avg = c2x_avg + h_data[i*3+0];
c2y_avg = c2y_avg + h_data[i*3+1];
c2_count++;
}
else {
// No minimum was found, maybe equal ?
}
}
if(c0_count == 0){
c0x_avg = h_centroids[0*2+0];
c0y_avg = h_centroids[0*2+1];
c0_count = 1;
}
else if(c1_count == 0){
c1x_avg = h_centroids[1*2+0];
c1y_avg = h_centroids[1*2+1];
c1_count = 1;
}
else if(c2_count == 0){
c2x_avg = h_centroids[2*2+0];
c2y_avg = h_centroids[2*2+1];
c2_count = 1;
}
h_centroids[0*2+0] = c0x_avg / c0_count;
h_centroids[0*2+1] = c0y_avg / c0_count;
h_centroids[1*2+0] = c1x_avg / c1_count;
h_centroids[1*2+1] = c1y_avg / c1_count;
h_centroids[2*2+0] = c2x_avg / c2_count;
h_centroids[2*2+1] = c2y_avg / c2_count;
}
hipFree(d_data);
hipFree(d_centroids);
/*
cout << "Centroid 1 : (" << h_centroids[0*2+0] << " , " << h_centroids[0*2+1] << ")" << endl;
cout << "Centroid 2 : (" << h_centroids[1*2+0] << " , " << h_centroids[1*2+1] << ")" << endl;
cout << "Centroid 3 : (" << h_centroids[2*2+0] << " , " << h_centroids[2*2+1] << ")" << endl;
*/
/*printf("Centroid 1 : ( %d),(%d)\n",h_centroids[0*2+0],h_centroids[0*2+1]);
printf("Centroid 2 : ( %d),(%d)\n",h_centroids[1*2+0],h_centroids[1*2+1]);
printf("Centroid 3 : ( %d),(%d)\n",h_centroids[2*2+0],h_centroids[2*2+1]);*/
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
printf("CUDA : %.3f seconds\n",elapsedTime);
return 0;
}
| 00e6c6182020659e563b9741b0e3b7e88b13a134.cu | #include <stdio.h>
#include <cuda.h>
#include <vector>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cuda_device_runtime_api.h>
#include <cudalibxt.h>
#include <device_launch_parameters.h>
#include <device_functions.h>
#include <sys/time.h>
#define num_loops 10000
#define num_data 256
#define data_max_value 10000
#define BLOCK_SIZE 1024
__global__ void kernel_kmeans(int *data, int *centroids, int numdata)
{
int row = blockIdx.x * BLOCK_SIZE + threadIdx.y;
// int col = blockIdx.x * BLOCK_SIZE + threadIdx.x;
float d_c0, d_c1, d_c2;
float2 pt;
float2 ctr0,ctr1,ctr2;
pt.x = data[(row * 3) + 0];
pt.y = data[(row * 3) + 1];
ctr0.x = centroids[(0 * 2) + 0];
ctr0.y = centroids[(0 * 2) + 1];
ctr1.x = centroids[(1 * 2) + 0];
ctr1.y = centroids[(1 * 2) + 1];
ctr2.x = centroids[(2 * 2) + 0];
ctr2.y = centroids[(2 * 2) + 1];
d_c0 = hypot(pt.x-ctr0.x,pt.y-ctr0.y);
d_c1 = hypot(pt.x-ctr1.x,pt.y-ctr1.y);
d_c2 = hypot(pt.x-ctr2.x,pt.y-ctr2.y);
if ((int)d_c0 < (int)d_c1 && (int)d_c0 < (int)d_c2)
data[(3 * row) + 2] = 0;
else if ((int)d_c1 < (int)d_c0 && (int)d_c1 < (int)d_c2)
data[(3 * row) + 2] = 1;
else if ((int)d_c2 < (int)d_c0 && (int)d_c2 < (int)d_c1)
data[(3 * row) + 2] = 2;
}
int main()
{
struct timeval start;
struct timeval end;
double elapsedTime;
//int h_data[num_data][3];
//int h_centroids[3][2];
srand(time(NULL));
int *h_data;
int *h_centroids;
//srand(time(NULL));
h_data = (int*)malloc(num_data * 3 * sizeof(int));
h_centroids = (int*)malloc(3 * 2 * sizeof(int));
for (int i = 0; i < num_data; i++)
{
for (int j = 0; j < 2; j++)
{
h_data[ i* 3 + j] = (rand() % data_max_value) + 1;
//h_data[ i][j] = 1;//(rand() % data_max_value) + 1;
}
}
for (int i = 0; i < 3; i++)
{
int index = rand() % num_data;
h_centroids[i*2+0] = h_data[index*3+0];
h_centroids[i*2+1] = h_data[index*3+1];
//cout << "centroid:" << i << "is" << h_centroids[i*2+0] << "," << centroids[i*2+1] << endl;
//printf("centroid %d is %d,%d\n",i,h_centroids[i*2+0],h_centroids[i*2+1]);
//printf("\n");
}
int *d_data;
int *d_centroids;
cudaMalloc(&d_data , num_data*3*sizeof(int));
cudaMalloc(&d_centroids , 3*2*sizeof(int));
cudaMemcpy(d_data, h_data, num_data*3*sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(d_centroids, h_centroids, 3*2*sizeof(int), cudaMemcpyHostToDevice);
//dim3 threads(BLOCK_SIZE);
//dim3 grid(num_data/threads.x);
gettimeofday(&start, NULL);
for (int loop = 0; loop < num_loops;loop++)
{
cudaMemcpy(d_centroids, h_centroids, 3*2*sizeof(int), cudaMemcpyHostToDevice);
kernel_kmeans<<<num_data/BLOCK_SIZE,BLOCK_SIZE>>>(d_data,d_centroids,num_data);
cudaMemcpy(h_data, d_data, num_data*3*sizeof(int), cudaMemcpyDeviceToHost);
float c0x_avg = 0, c0y_avg = 0, c1x_avg = 0, c1y_avg = 0, c2x_avg = 0, c2y_avg = 0;
int c0_count = 0, c1_count = 0, c2_count = 0;
// moving the centroid step
for (int i = 0; i < num_data; i++)
{
if (h_data[i*3+2] == 0)
{
c0x_avg = c0x_avg + h_data[i*3+0];
c0y_avg = c0y_avg + h_data[i*3+1];
c0_count++;
}
else if (h_data[i*3+2] == 1)
{
c1x_avg = c1x_avg + h_data[i*3+0];
c1y_avg = c1y_avg + h_data[i*3+1];
c1_count++;
}
else if (h_data[i*3+2] == 2)
{
c2x_avg = c2x_avg + h_data[i*3+0];
c2y_avg = c2y_avg + h_data[i*3+1];
c2_count++;
}
else {
// No minimum was found, maybe equal ?
}
}
if(c0_count == 0){
c0x_avg = h_centroids[0*2+0];
c0y_avg = h_centroids[0*2+1];
c0_count = 1;
}
else if(c1_count == 0){
c1x_avg = h_centroids[1*2+0];
c1y_avg = h_centroids[1*2+1];
c1_count = 1;
}
else if(c2_count == 0){
c2x_avg = h_centroids[2*2+0];
c2y_avg = h_centroids[2*2+1];
c2_count = 1;
}
h_centroids[0*2+0] = c0x_avg / c0_count;
h_centroids[0*2+1] = c0y_avg / c0_count;
h_centroids[1*2+0] = c1x_avg / c1_count;
h_centroids[1*2+1] = c1y_avg / c1_count;
h_centroids[2*2+0] = c2x_avg / c2_count;
h_centroids[2*2+1] = c2y_avg / c2_count;
}
cudaFree(d_data);
cudaFree(d_centroids);
/*
cout << "Centroid 1 : (" << h_centroids[0*2+0] << " , " << h_centroids[0*2+1] << ")" << endl;
cout << "Centroid 2 : (" << h_centroids[1*2+0] << " , " << h_centroids[1*2+1] << ")" << endl;
cout << "Centroid 3 : (" << h_centroids[2*2+0] << " , " << h_centroids[2*2+1] << ")" << endl;
*/
/*printf("Centroid 1 : ( %d),(%d)\n",h_centroids[0*2+0],h_centroids[0*2+1]);
printf("Centroid 2 : ( %d),(%d)\n",h_centroids[1*2+0],h_centroids[1*2+1]);
printf("Centroid 3 : ( %d),(%d)\n",h_centroids[2*2+0],h_centroids[2*2+1]);*/
gettimeofday(&end, NULL);
// Calculate the time it took to do the above task
elapsedTime = (end.tv_sec - start.tv_sec) * 1000.0;
elapsedTime += (end.tv_usec - start.tv_usec) / 1000.0;
elapsedTime /= 1000;
printf("CUDA : %.3f seconds\n",elapsedTime);
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.