hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
cd978c773322186fda764a347120b763806b0199.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <hip/hip_runtime.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <unordered_map>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "datetime_parser.cuh"
#include "cudf.h"
#include "utilities/error_utils.h"
#include "rmm/rmm.h"
#include "NVStrings.h"
constexpr int32_t HASH_SEED = 33;
using namespace std;
//-- define the structure for raw data handling - for internal use
typedef struct raw_csv_ {
char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
unsigned long long* d_num_records; // on-device: Number of records.
unsigned long long* recStart; // on-device: Starting position of the records.
char delimiter; // host: the delimiter
char terminator; // host: the line terminator
char quotechar; // host: the quote character
bool keepquotes; // host: indicates to keep the start and end quotechar
bool doublequote; // host: indicates to interpret two consecutive quotechar as a single
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
unsigned long long num_records; // host: number of records (per column)
// int num_cols; // host: number of columns
int num_active_cols; // host: number of columns that will be return to user.
int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long header_row; // Row id of the header
bool dayfirst;
} raw_csv_t;
typedef struct column_data_ {
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
unsigned long long countNULL;
} column_data_t;
typedef struct parsing_opts_ {
char delimiter;
char terminator;
char quotechar;
bool keepquotes;
} parsing_opts_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * csvData );
gdf_error allocateGdfDataSpace(gdf_column *);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { cerr << "ERROR: " << error << " in " << txt << endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
__device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x);
gdf_error launch_countRecords(raw_csv_t * csvData);
gdf_error launch_storeRecordStart(raw_csv_t * csvData);
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, long row_offset, unsigned long long *);
gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, long row_offset, column_data_t* d_columnData);
__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records);
__global__ void storeRecordStart(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) ;
__global__ void convertCsvToGdf(char *csv, const parsing_opts_t opts, unsigned long long num_records, int num_columns,bool *parseCol,unsigned long long *recStart,gdf_dtype *dtype,void **gdf_data,gdf_valid_type **valid,string_pair **str_cols,unsigned long long row_offset, long header_row,bool dayfirst,unsigned long long *num_valid);
__global__ void dataTypeDetection(char *raw_csv, const parsing_opts_t opts, unsigned long long num_records, int num_columns, bool *parseCol, unsigned long long *recStart, unsigned long long row_offset, long header_row, column_data_t* d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ int whichBitmap(int record) { return (record/8); }
__device__ int whichBit(int bit) { return (bit % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
std::string stringType(gdf_dtype dt){
switch (dt){
case GDF_STRING: return std::string("str");
case GDF_DATE64: return std::string("date64");
case GDF_CATEGORY: return std::string("category");
case GDF_FLOAT64: return std::string("float64");
case GDF_INT8: return std::string("int8");
case GDF_INT16: return std::string("int16");
case GDF_INT32: return std::string("int32");
case GDF_INT64: return std::string("int64");
default:
return "long";
}
}
/**
* @brief read in a CSV file
*
* Read in a CSV file, extract all fields, and return a GDF (array of gdf_columns)
*
* @param[in and out] args the input arguments, but this also contains the returned data
*
* Arguments:
*
* Required Arguments
* file_path - file location to read from - currently the file cannot be compressed
* num_cols - number of columns in the names and dtype arrays
* names - ordered List of column names, this is a required field
* dtype - ordered List of data types, this is required
*
* Optional
* lineterminator - define the line terminator character. Default is '\n'
* delimiter - define the field separator, default is ','. This argument is also called 'sep'
*
* quotechar; define the character used to denote start and end of a quoted item
* quoting; treat string fields as quoted item and remove the first and last quotechar
* nodoublequote; do not interpret two consecutive quotechar as a single quotechar
*
* delim_whitespace - use white space as the delimiter - default is false. This overrides the delimiter argument
* skipinitialspace - skip white spaces after the delimiter - default is false
*
* skiprows - number of rows at the start of the files to skip, default is 0
* skipfooter - number of rows at the bottom of the file to skip - default is 0
*
* dayfirst - is the first value the day? DD/MM versus MM/DD
*
*
* Output
* num_cols_out - Out: return the number of columns read in
* num_rows_out - Out: return the number of rows read in
* gdf_column **data - Out: return the array of *gdf_columns
*
*
* @return gdf_error
*
*/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t * raw_csv = new raw_csv_t;
// error = parseArguments(args, raw_csv);
raw_csv->num_actual_cols = args->num_cols;
raw_csv->num_active_cols = args->num_cols;
raw_csv->num_records = 0;
if(args->delim_whitespace == true) {
raw_csv->delimiter = ' ';
} else {
raw_csv->delimiter = args->delimiter;
}
if(args->windowslinetermination) {
raw_csv->terminator = '\n';
} else {
raw_csv->terminator = args->lineterminator;
}
raw_csv->quotechar = args->quotechar;
if(raw_csv->quotechar != '\0') {
raw_csv->keepquotes = !args->quoting;
raw_csv->doublequote = args->doublequote;
} else {
raw_csv->keepquotes = true;
raw_csv->doublequote = false;
}
raw_csv->dayfirst = args->dayfirst;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
struct stat st;
int fd;
fd = open(args->file_path, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
raw_csv->num_bytes = st.st_size;
map_data = mmap(0, raw_csv->num_bytes, PROT_READ, MAP_PRIVATE, fd, 0);
if (map_data == MAP_FAILED || raw_csv->num_bytes==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
//-----------------------------------------------------------------------------
//--- create a structure to hold variables used to parse the CSV data
error = updateRawCsv( (const char *)map_data, (long)raw_csv->num_bytes, raw_csv );
checkError(error, "call to createRawCsv");
//-----------------------------------------------------------------------------
// find the record and fields points (in bitmaps)
error = launch_countRecords(raw_csv);
checkError(error, "call to record counter");
//-----------------------------------------------------------------------------
//-- Allocate space to hold the record starting point
RMM_TRY( RMM_ALLOC((void**)&(raw_csv->recStart), (sizeof(unsigned long long) * (raw_csv->num_records + 1)), 0) );
CUDA_TRY( hipMemset(raw_csv->d_num_records, 0, (sizeof(unsigned long long) )) ) ;
//-----------------------------------------------------------------------------
//-- Scan data and set the starting positions
error = launch_storeRecordStart(raw_csv);
checkError(error, "call to record initial position store");
// Previous kernel stores the record positions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair. The future major refactoring
// of csv_reader and its kernels will probably use a different tactic.
if (raw_csv->quotechar != '\0') {
const size_t recTotalSize = sizeof(unsigned long long) * (raw_csv->num_records + 1);
unsigned long long *h_recStart = (unsigned long long*)malloc(recTotalSize);
CUDA_TRY( hipMemcpy(h_recStart, raw_csv->recStart, recTotalSize, hipMemcpyDeviceToHost) );
const char *h_data = (const char *)(map_data);
unsigned long long recCount = raw_csv->num_records;
bool quotation = false;
for (size_t i = 1; i < raw_csv->num_records; ++i) {
if (h_data[h_recStart[i] - 1] == raw_csv->quotechar) {
quotation = !quotation;
h_recStart[i] = raw_csv->num_bytes;
recCount--;
}
else if (quotation) {
h_recStart[i] = raw_csv->num_bytes;
recCount--;
}
}
CUDA_TRY( hipMemcpy(raw_csv->recStart, h_recStart, recTotalSize, hipMemcpyHostToDevice) );
thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
raw_csv->num_records = recCount;
free(h_recStart);
}
//-----------------------------------------------------------------------------
//-- Acquire header row of
int h_num_cols=0, h_dup_cols_removed=0;
int skip_header=0;
// Check if the user gave us a list of column names
if(args->names==NULL){
// Getting the first row of data from the file. We will parse the data to find lineterminator as
// well as the column delimiter.
char* cmap_data = (char *)map_data;
unsigned long long c=0;
raw_csv->header_row=0;
if (args->header>=0){
raw_csv->header_row = args->header;
}
if(raw_csv->header_row > (long)raw_csv->num_records){
checkError(GDF_FILE_ERROR, "Number of records is smaller than the id of the specified header row");
}
unsigned long long headerPositions[2];
CUDA_TRY( hipMemcpy(headerPositions,raw_csv->recStart + raw_csv->header_row, sizeof(unsigned long long)*2, hipMemcpyDeviceToHost));
unsigned long long start = headerPositions[0];
unsigned long long stop = headerPositions[1];
c=start;
while(c<stop){
if (cmap_data[c]==args->lineterminator){
h_num_cols++;
break;
}
else if(cmap_data[c] == '\r' && (c+1L)<(unsigned long long)raw_csv->num_bytes && cmap_data[c+1] == '\n'){
h_num_cols++;
break;
}else if (cmap_data[c]==args->delimiter)
h_num_cols++;
c++;
}
unsigned long long prev=0;
c=start;
raw_csv->col_names.clear();
if(args->header>=0){
h_num_cols=0;
// Storing the names of the columns into a vector of strings
while(c<=stop){
if (cmap_data[c]==args->delimiter || cmap_data[c]==args->lineterminator){
std::string colName(cmap_data +prev,c-prev );
prev=c+1;
raw_csv->col_names.push_back(colName);
h_num_cols++;
}
c++;
}
skip_header=1;
}else{
for (int i = 0; i<h_num_cols; i++){
std::string newColName = std::to_string(i);
raw_csv->col_names.push_back(newColName);
}
}
// Allocating a boolean array that will use to state if a column needs to read or filtered.
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols));
RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) );
for (int i = 0; i<h_num_cols; i++)
raw_csv->h_parseCol[i]=true;
// Looking for duplicates
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv->col_names.begin(), it2);
raw_csv->h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv->num_actual_cols = h_num_cols; // Actuaul number of columns in the CSV file
raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), hipMemcpyHostToDevice));
}
else {
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols));
RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) );
for (int i = 0; i<raw_csv->num_actual_cols; i++){
raw_csv->h_parseCol[i]=true;
std::string col_name = args->names[i];
raw_csv->col_names.push_back(col_name);
}
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), hipMemcpyHostToDevice));
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv->h_parseCol[pos]=true;
}
raw_csv->num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv->col_names.begin(), it);
raw_csv->h_parseCol[pos]=true;
break;
}
}
}
raw_csv->num_active_cols = countFound;
}
CUDA_TRY(hipMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), hipMemcpyHostToDevice));
}
raw_csv->num_records -= (args->skiprows + args->skipfooter);
if(skip_header==0){
raw_csv->header_row=-1;
}else{
raw_csv->num_records-=1;
}
//-----------------------------------------------------------------------------
//--- done with host data
close(fd);
munmap(map_data, raw_csv->num_bytes);
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
// if(args->dtype==NULL){
if(args->names==NULL){
column_data_t *d_ColumnData,*h_ColumnData;
h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols));
RMM_TRY( RMM_ALLOC((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) );
CUDA_TRY( hipMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ;
launch_dataTypeDetection(raw_csv, args->skiprows, d_ColumnData);
CUDA_TRY( hipMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), hipMemcpyDeviceToHost));
vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end)
raw_csv->dtypes.clear();
for(int col = 0; col < raw_csv->num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv->num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv->dtypes=d_detectedTypes;
free(h_ColumnData);
RMM_TRY( RMM_FREE( d_ColumnData, 0 ) );
}
else{
for ( int x = 0; x < raw_csv->num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = convertStringToDtype( temp_type );
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv->dtypes.push_back(col_dtype);
}
}
//-----------------------------------------------------------------------------
//--- allocate space for the results
gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols);
void **d_data,**h_data;
gdf_valid_type **d_valid,**h_valid;
unsigned long long *d_valid_count,*h_valid_count;
gdf_dtype *d_dtypes,*h_dtypes;
h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols));
h_valid_count = (unsigned long long*)malloc ( sizeof(unsigned long long)* (raw_csv->num_active_cols));
h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols));
h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols));
RMM_TRY( RMM_ALLOC((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) );
CUDA_TRY( hipMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) );
int stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING)
stringColCount++;
}
string_pair **h_str_cols = NULL, **d_str_cols = NULL;
if (stringColCount > 0 ) {
h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount));
RMM_TRY( RMM_ALLOC((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) );
for (int col = 0; col < stringColCount; col++) {
RMM_TRY( RMM_ALLOC((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) );
}
CUDA_TRY(hipMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, hipMemcpyHostToDevice));
}
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1);
gdf->size = raw_csv->num_records;
gdf->dtype = raw_csv->dtypes[col];
gdf->null_count = 0; // will be filled in later
//--- column name
std::string str = raw_csv->col_names[col];
int len = str.length() + 1;
gdf->col_name = (char *)malloc(sizeof(char) * len);
memcpy(gdf->col_name, str.c_str(), len);
gdf->col_name[len -1] = '\0';
allocateGdfDataSpace(gdf);
cols[col] = gdf;
h_dtypes[col] = raw_csv->dtypes[col];
h_data[col] = gdf->data;
h_valid[col] = gdf->valid;
}
CUDA_TRY( hipMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
CUDA_TRY( hipMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
CUDA_TRY( hipMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), hipMemcpyHostToDevice));
free(h_dtypes);
free(h_valid);
free(h_data);
launch_dataConvertColumns(raw_csv,d_data, d_valid, d_dtypes,d_str_cols, args->skiprows, d_valid_count);
hipDeviceSynchronize();
stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = cols[col];
if (gdf->dtype != gdf_dtype::GDF_STRING)
continue;
NVStrings* const stringCol = NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records));
if ((raw_csv->quotechar != '\0') && (raw_csv->doublequote==true)) {
// In PANDAS, default of enabling doublequote for two consecutive
// quotechar in quote fields results in reduction to single
std::string quotechar = std::string(&raw_csv->quotechar);
std::string doublequotechar = quotechar + raw_csv->quotechar;
gdf->data = stringCol->replace(doublequotechar.c_str(), quotechar.c_str());
NVStrings::destroy(stringCol);
}
else {
gdf->data = stringCol;
}
RMM_TRY( RMM_FREE( h_str_cols [stringColCount], 0 ) );
stringColCount++;
}
CUDA_TRY( hipMemcpy(h_valid_count,d_valid_count, sizeof(unsigned long long) * (raw_csv->num_active_cols), hipMemcpyDeviceToHost));
//--- set the null count
for ( int col = 0; col < raw_csv->num_active_cols; col++) {
cols[col]->null_count = raw_csv->num_records - h_valid_count[col];
}
free(h_valid_count);
// free up space that is no longer needed
if (h_str_cols != NULL)
free ( h_str_cols);
free(raw_csv->h_parseCol);
if (d_str_cols != NULL)
RMM_TRY( RMM_FREE( d_str_cols, 0 ) );
RMM_TRY( RMM_FREE( d_valid, 0 ) );
RMM_TRY( RMM_FREE( d_valid_count, 0 ) );
RMM_TRY( RMM_FREE( d_dtypes, 0 ) );
RMM_TRY( RMM_FREE( d_data, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->recStart, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->d_parseCol, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->d_num_records, 0 ) );
CUDA_TRY( hipFree ( raw_csv->data) );
args->data = cols;
args->num_cols_out = raw_csv->num_active_cols;
args->num_rows_out = raw_csv->num_records;
delete raw_csv;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/*
* Create the raw_csv_t structure and allocate space on the GPU
*/
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * raw ) {
int num_bits = (num_bytes + 63) / 64;
CUDA_TRY( hipMallocManaged ((void**)&raw->data, (sizeof(char) * num_bytes)));
// RMM_TRY( RMM_ALLOC((void**)&raw->data, (sizeof(char) * num_bytes),0 ));
RMM_TRY( RMM_ALLOC((void**)&raw->d_num_records, sizeof(unsigned long long),0) );
CUDA_TRY( hipMemcpy(raw->data, data, num_bytes, hipMemcpyHostToDevice));
CUDA_TRY( hipMemset(raw->d_num_records,0, ((sizeof(long)) )) );
raw->num_bits = num_bits;
return GDF_SUCCESS;
}
/*
* For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in
*/
gdf_error allocateGdfDataSpace(gdf_column *gdf) {
long N = gdf->size;
long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap
//--- allocate space for the valid bitmaps
RMM_TRY( RMM_ALLOC((void**)&gdf->valid, (sizeof(gdf_valid_type) * num_bitmaps), 0) );
CUDA_TRY(hipMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) );
int elementSize=0;
//--- Allocate space for the data
switch(gdf->dtype) {
case gdf_dtype::GDF_INT8:
elementSize = sizeof(int8_t);
break;
case gdf_dtype::GDF_INT16:
elementSize = sizeof(int16_t);
break;
case gdf_dtype::GDF_INT32:
elementSize = sizeof(int32_t);
break;
case gdf_dtype::GDF_INT64:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_FLOAT32:
elementSize = sizeof(float);
break;
case gdf_dtype::GDF_FLOAT64:
elementSize = sizeof(double);
break;
case gdf_dtype::GDF_DATE32:
elementSize = sizeof(gdf_date32);
break;
case gdf_dtype::GDF_DATE64:
elementSize = sizeof(gdf_date64);
break;
case gdf_dtype::GDF_TIMESTAMP:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_CATEGORY:
elementSize = sizeof(gdf_category);
break;
case gdf_dtype::GDF_STRING:
return gdf_error::GDF_SUCCESS;
// Memory for gdf->data allocated by string class eventually
default:
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY( RMM_ALLOC((void**)&gdf->data, elementSize * N, 0) );
return gdf_error::GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_countRecords(raw_csv_t * csvData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countRecords) );
// Calculate actual block count to use based on bitmap count
// Each bitmap is for a 64-byte chunk, and each data index is bitmap ID * 64
int gridSize = (csvData->num_bits + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( countRecords) , dim3(gridSize), dim3(blockSize) , 0, 0,
csvData->data, csvData->terminator, csvData->quotechar,
csvData->num_bytes, csvData->num_bits, csvData->d_num_records
);
CUDA_TRY(hipGetLastError());
long recs=-1;
CUDA_TRY(hipMemcpy(&recs, csvData->d_num_records, sizeof(long), hipMemcpyDeviceToHost));
csvData->num_records=recs;
CUDA_TRY(hipGetLastError());
return GDF_SUCCESS;
}
__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if (tid >= num_bits)
return;
// data ID is a multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
// process the data
long tokenCount = 0;
for (long x = 0; x < byteToProcess; x++) {
// Scan and log records. If quotations are enabled, then also log quotes
// for a postprocess ignore, as the chunk here has limited visibility.
if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) {
tokenCount++;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
tokenCount++;
}
}
atomicAdd((unsigned long long int*)num_records,(unsigned long long int)tokenCount);
}
gdf_error launch_storeRecordStart(raw_csv_t * csvData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, storeRecordStart) );
// Calculate actual block count to use based on bitmap count
// Each bitmap is for a 64-byte chunk, and each data index is bitmap ID * 64
int gridSize = (csvData->num_bits + blockSize - 1) / blockSize;
hipLaunchKernelGGL(( storeRecordStart) , dim3(gridSize), dim3(blockSize) , 0, 0,
csvData->data, csvData->terminator, csvData->quotechar,
csvData->num_bytes, csvData->num_bits, csvData->d_num_records,
csvData->recStart
);
CUDA_TRY( hipGetLastError() );
return GDF_SUCCESS;
}
__global__ void storeRecordStart(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID - multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
if(tid==0){
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+0;
}
// process the data
for (long x = 0; x < byteToProcess; x++) {
// Scan and log records. If quotations are enabled, then also log quotes
// for a postprocess ignore, as the chunk here has limited visibility.
if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) {
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
}
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, long row_offset, unsigned long long *num_valid) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, convertCsvToGdf) );
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
parsing_opts_t opts;
opts.delimiter = raw_csv->delimiter;
opts.terminator = raw_csv->terminator;
opts.quotechar = raw_csv->quotechar;
opts.keepquotes = raw_csv->keepquotes;
hipLaunchKernelGGL(( convertCsvToGdf) , dim3(gridSize), dim3(blockSize) , 0, 0,
raw_csv->data,
opts,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
d_dtypes,
gdf,
valid,
str_cols,
row_offset,
raw_csv->header_row,
raw_csv->dayfirst,
num_valid
);
CUDA_TRY( hipGetLastError() );
return GDF_SUCCESS;
}
/*
* Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows.
*
*/
__global__ void convertCsvToGdf(
char *raw_csv,
const parsing_opts_t opts,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
gdf_dtype *dtype,
void **gdf_data,
gdf_valid_type **valid,
string_pair **str_cols,
unsigned long long row_offset,
long header_row,
bool dayfirst,
unsigned long long *num_valid
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
int stringCol = 0;
bool quotation = false;
while(col<num_columns){
if(start>stop)
break;
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
if(parseCol[col]==true){
long tempPos=pos-1;
if(dtype[col] != gdf_dtype::GDF_CATEGORY && dtype[col] != gdf_dtype::GDF_STRING){
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
}
if(start<=(tempPos)) { // Empty strings are not legal values
switch(dtype[col]) {
case gdf_dtype::GDF_INT8:
{
int8_t *gdf_out = (int8_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT16: {
int16_t *gdf_out = (int16_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT32:
{
int32_t *gdf_out = (int32_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT64:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT32:
{
float *gdf_out = (float *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT64:
{
double *gdf_out = (double *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_DATE32:
{
gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_DATE64:
{
gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_TIMESTAMP:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_CATEGORY:
{
gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED);
}
break;
case gdf_dtype::GDF_STRING:
{
long end = pos;
if(opts.keepquotes==false){
if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){
start++;
end--;
}
}
str_cols[stringCol][rec_id].first = raw_csv+start;
str_cols[stringCol][rec_id].second = size_t(end-start);
stringCol++;
}
break;
default:
break;
}
// set the valid bitmap - all bits were set to 0 to start
int bitmapIdx = whichBitmap(rec_id); // which bitmap
int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd((unsigned long long int*)&num_valid[col],(unsigned long long int)1);
}
else if(dtype[col]==gdf_dtype::GDF_STRING){
str_cols[stringCol][rec_id].first = NULL;
str_cols[stringCol][rec_id].second = 0;
stringCol++;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataTypeDetection(
raw_csv_t * raw_csv,
long row_offset,
column_data_t* d_columnData)
{
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dataTypeDetection) );
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
parsing_opts_t opts;
opts.delimiter = raw_csv->delimiter;
opts.terminator = raw_csv->terminator;
opts.quotechar = raw_csv->quotechar;
opts.keepquotes = raw_csv->keepquotes;
hipLaunchKernelGGL(( dataTypeDetection) , dim3(gridSize), dim3(blockSize) , 0, 0,
raw_csv->data,
opts,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
row_offset,
raw_csv->header_row,
d_columnData
);
CUDA_TRY( hipGetLastError() );
return GDF_SUCCESS;
}
/*
*/
__global__ void dataTypeDetection(
char *raw_csv,
const parsing_opts_t opts,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
unsigned long long row_offset,
long header_row,
column_data_t* d_columnData
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
bool quotation = false;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
// Finding the breaking point for each column
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long strLen=pos-start;
// Remove all pre and post white-spaces. We might find additional NULL fields if the entire entry is made up of only spaces.
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
for(long startPos=start; startPos<=tempPos; startPos++){
if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
default:
countString++;
break;
}
}
if(strLen==0) // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
// Integers have to have the length of the string or can be off by one if they start with a minus sign
else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
int64_t i = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
if(i >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(i >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(i >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
// Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign.
else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
/*
* Return which bit is set
* x is the occurrence: 1 = first, 2 = seconds, ...
*/
__device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) {
int idx = tid;
if ( x == 0 )
return -1;
int withinBitCount = 0;
int offset = 0;
int found = 0;
uint64_t bitmap = r_bits[idx];
while (found != x)
{
if(bitmap == 0)
{
idx++;
if (idx >= num_bits)
return -1;
bitmap = r_bits[idx];
offset += 64;
withinBitCount = 0;
}
if ( bitmap & 1 ) {
found++; //found a set bit
}
bitmap >>= 1;
++withinBitCount;
}
offset += withinBitCount -1;
return offset;
}
| cd978c773322186fda764a347120b763806b0199.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @file csv-reader.cu code to read csv data
*
* CSV Reader
*/
#include <cuda_runtime.h>
#include <iostream>
#include <vector>
#include <string>
#include <stdio.h>
#include <iostream>
#include <iomanip>
#include <vector>
#include <unordered_map>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/mman.h>
#include <thrust/scan.h>
#include <thrust/reduce.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/host_vector.h>
#include "type_conversion.cuh"
#include "datetime_parser.cuh"
#include "cudf.h"
#include "utilities/error_utils.h"
#include "rmm/rmm.h"
#include "NVStrings.h"
constexpr int32_t HASH_SEED = 33;
using namespace std;
//-- define the structure for raw data handling - for internal use
typedef struct raw_csv_ {
char * data; // on-device: the raw unprocessed CSV data - loaded as a large char * array
unsigned long long* d_num_records; // on-device: Number of records.
unsigned long long* recStart; // on-device: Starting position of the records.
char delimiter; // host: the delimiter
char terminator; // host: the line terminator
char quotechar; // host: the quote character
bool keepquotes; // host: indicates to keep the start and end quotechar
bool doublequote; // host: indicates to interpret two consecutive quotechar as a single
long num_bytes; // host: the number of bytes in the data
long num_bits; // host: the number of 64-bit bitmaps (different than valid)
unsigned long long num_records; // host: number of records (per column)
// int num_cols; // host: number of columns
int num_active_cols; // host: number of columns that will be return to user.
int num_actual_cols; // host: number of columns in the file --- based on the number of columns in header
vector<gdf_dtype> dtypes; // host: array of dtypes (since gdf_columns are not created until end)
vector<string> col_names; // host: array of column names
bool* h_parseCol; // host : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
bool* d_parseCol; // device : array of booleans stating if column should be parsed in reading process: parseCol[x]=false means that the column x needs to be filtered out.
long header_row; // Row id of the header
bool dayfirst;
} raw_csv_t;
typedef struct column_data_ {
unsigned long long countFloat;
unsigned long long countDateAndTime;
unsigned long long countString;
unsigned long long countInt8;
unsigned long long countInt16;
unsigned long long countInt32;
unsigned long long countInt64;
unsigned long long countNULL;
} column_data_t;
typedef struct parsing_opts_ {
char delimiter;
char terminator;
char quotechar;
bool keepquotes;
} parsing_opts_t;
using string_pair = std::pair<const char*,size_t>;
//
//---------------create and process ---------------------------------------------
//
gdf_error parseArguments(csv_read_arg *args, raw_csv_t *csv);
// gdf_error getColNamesAndTypes(const char **col_names, const char **dtypes, raw_csv_t *d);
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * csvData );
gdf_error allocateGdfDataSpace(gdf_column *);
gdf_dtype convertStringToDtype(std::string &dtype);
#define checkError(error, txt) if ( error != GDF_SUCCESS) { cerr << "ERROR: " << error << " in " << txt << endl; return error; }
//
//---------------CUDA Kernel ---------------------------------------------
//
__device__ int findSetBit(int tid, long num_bits, uint64_t *f_bits, int x);
gdf_error launch_countRecords(raw_csv_t * csvData);
gdf_error launch_storeRecordStart(raw_csv_t * csvData);
gdf_error launch_dataConvertColumns(raw_csv_t * raw_csv, void** d_gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes, string_pair **str_cols, long row_offset, unsigned long long *);
gdf_error launch_dataTypeDetection(raw_csv_t * raw_csv, long row_offset, column_data_t* d_columnData);
__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records);
__global__ void storeRecordStart(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) ;
__global__ void convertCsvToGdf(char *csv, const parsing_opts_t opts, unsigned long long num_records, int num_columns,bool *parseCol,unsigned long long *recStart,gdf_dtype *dtype,void **gdf_data,gdf_valid_type **valid,string_pair **str_cols,unsigned long long row_offset, long header_row,bool dayfirst,unsigned long long *num_valid);
__global__ void dataTypeDetection(char *raw_csv, const parsing_opts_t opts, unsigned long long num_records, int num_columns, bool *parseCol, unsigned long long *recStart, unsigned long long row_offset, long header_row, column_data_t* d_columnData);
//
//---------------CUDA Valid (8 blocks of 8-bits) Bitmap Kernels ---------------------------------------------
//
__device__ int whichBitmap(int record) { return (record/8); }
__device__ int whichBit(int bit) { return (bit % 8); }
__inline__ __device__ void validAtomicOR(gdf_valid_type* address, gdf_valid_type val)
{
int32_t *base_address = (int32_t*)((gdf_valid_type*)address - ((size_t)address & 3));
int32_t int_val = (int32_t)val << (((size_t) address & 3) * 8);
atomicOr(base_address, int_val);
}
__device__ void setBit(gdf_valid_type* address, int bit) {
gdf_valid_type bitMask[8] = {1, 2, 4, 8, 16, 32, 64, 128};
validAtomicOR(address, bitMask[bit]);
}
std::string stringType(gdf_dtype dt){
switch (dt){
case GDF_STRING: return std::string("str");
case GDF_DATE64: return std::string("date64");
case GDF_CATEGORY: return std::string("category");
case GDF_FLOAT64: return std::string("float64");
case GDF_INT8: return std::string("int8");
case GDF_INT16: return std::string("int16");
case GDF_INT32: return std::string("int32");
case GDF_INT64: return std::string("int64");
default:
return "long";
}
}
/**
* @brief read in a CSV file
*
* Read in a CSV file, extract all fields, and return a GDF (array of gdf_columns)
*
* @param[in and out] args the input arguments, but this also contains the returned data
*
* Arguments:
*
* Required Arguments
* file_path - file location to read from - currently the file cannot be compressed
* num_cols - number of columns in the names and dtype arrays
* names - ordered List of column names, this is a required field
* dtype - ordered List of data types, this is required
*
* Optional
* lineterminator - define the line terminator character. Default is '\n'
* delimiter - define the field separator, default is ','. This argument is also called 'sep'
*
* quotechar; define the character used to denote start and end of a quoted item
* quoting; treat string fields as quoted item and remove the first and last quotechar
* nodoublequote; do not interpret two consecutive quotechar as a single quotechar
*
* delim_whitespace - use white space as the delimiter - default is false. This overrides the delimiter argument
* skipinitialspace - skip white spaces after the delimiter - default is false
*
* skiprows - number of rows at the start of the files to skip, default is 0
* skipfooter - number of rows at the bottom of the file to skip - default is 0
*
* dayfirst - is the first value the day? DD/MM versus MM/DD
*
*
* Output
* num_cols_out - Out: return the number of columns read in
* num_rows_out - Out: return the number of rows read in
* gdf_column **data - Out: return the array of *gdf_columns
*
*
* @return gdf_error
*
*/
gdf_error read_csv(csv_read_arg *args)
{
gdf_error error = gdf_error::GDF_SUCCESS;
//-----------------------------------------------------------------------------
// create the CSV data structure - this will be filled in as the CSV data is processed.
// Done first to validate data types
raw_csv_t * raw_csv = new raw_csv_t;
// error = parseArguments(args, raw_csv);
raw_csv->num_actual_cols = args->num_cols;
raw_csv->num_active_cols = args->num_cols;
raw_csv->num_records = 0;
if(args->delim_whitespace == true) {
raw_csv->delimiter = ' ';
} else {
raw_csv->delimiter = args->delimiter;
}
if(args->windowslinetermination) {
raw_csv->terminator = '\n';
} else {
raw_csv->terminator = args->lineterminator;
}
raw_csv->quotechar = args->quotechar;
if(raw_csv->quotechar != '\0') {
raw_csv->keepquotes = !args->quoting;
raw_csv->doublequote = args->doublequote;
} else {
raw_csv->keepquotes = true;
raw_csv->doublequote = false;
}
raw_csv->dayfirst = args->dayfirst;
//-----------------------------------------------------------------------------
// memory map in the data
void * map_data = NULL;
struct stat st;
int fd;
fd = open(args->file_path, O_RDONLY );
if (fd < 0) { close(fd); checkError(GDF_FILE_ERROR, "Error opening file"); }
if (fstat(fd, &st)) { close(fd); checkError(GDF_FILE_ERROR, "cannot stat file"); }
raw_csv->num_bytes = st.st_size;
map_data = mmap(0, raw_csv->num_bytes, PROT_READ, MAP_PRIVATE, fd, 0);
if (map_data == MAP_FAILED || raw_csv->num_bytes==0) { close(fd); checkError(GDF_C_ERROR, "Error mapping file"); }
//-----------------------------------------------------------------------------
//--- create a structure to hold variables used to parse the CSV data
error = updateRawCsv( (const char *)map_data, (long)raw_csv->num_bytes, raw_csv );
checkError(error, "call to createRawCsv");
//-----------------------------------------------------------------------------
// find the record and fields points (in bitmaps)
error = launch_countRecords(raw_csv);
checkError(error, "call to record counter");
//-----------------------------------------------------------------------------
//-- Allocate space to hold the record starting point
RMM_TRY( RMM_ALLOC((void**)&(raw_csv->recStart), (sizeof(unsigned long long) * (raw_csv->num_records + 1)), 0) );
CUDA_TRY( cudaMemset(raw_csv->d_num_records, 0, (sizeof(unsigned long long) )) ) ;
//-----------------------------------------------------------------------------
//-- Scan data and set the starting positions
error = launch_storeRecordStart(raw_csv);
checkError(error, "call to record initial position store");
// Previous kernel stores the record positions as encountered by all threads
// Sort the record positions as subsequent processing may require filtering
// certain rows or other processing on specific records
thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
// Currently, ignoring lineterminations within quotes is handled by recording
// the records of both, and then filtering out the records that is a quotechar
// or a linetermination within a quotechar pair. The future major refactoring
// of csv_reader and its kernels will probably use a different tactic.
if (raw_csv->quotechar != '\0') {
const size_t recTotalSize = sizeof(unsigned long long) * (raw_csv->num_records + 1);
unsigned long long *h_recStart = (unsigned long long*)malloc(recTotalSize);
CUDA_TRY( cudaMemcpy(h_recStart, raw_csv->recStart, recTotalSize, cudaMemcpyDeviceToHost) );
const char *h_data = (const char *)(map_data);
unsigned long long recCount = raw_csv->num_records;
bool quotation = false;
for (size_t i = 1; i < raw_csv->num_records; ++i) {
if (h_data[h_recStart[i] - 1] == raw_csv->quotechar) {
quotation = !quotation;
h_recStart[i] = raw_csv->num_bytes;
recCount--;
}
else if (quotation) {
h_recStart[i] = raw_csv->num_bytes;
recCount--;
}
}
CUDA_TRY( cudaMemcpy(raw_csv->recStart, h_recStart, recTotalSize, cudaMemcpyHostToDevice) );
thrust::sort(thrust::device, raw_csv->recStart, raw_csv->recStart + raw_csv->num_records + 1);
raw_csv->num_records = recCount;
free(h_recStart);
}
//-----------------------------------------------------------------------------
//-- Acquire header row of
int h_num_cols=0, h_dup_cols_removed=0;
int skip_header=0;
// Check if the user gave us a list of column names
if(args->names==NULL){
// Getting the first row of data from the file. We will parse the data to find lineterminator as
// well as the column delimiter.
char* cmap_data = (char *)map_data;
unsigned long long c=0;
raw_csv->header_row=0;
if (args->header>=0){
raw_csv->header_row = args->header;
}
if(raw_csv->header_row > (long)raw_csv->num_records){
checkError(GDF_FILE_ERROR, "Number of records is smaller than the id of the specified header row");
}
unsigned long long headerPositions[2];
CUDA_TRY( cudaMemcpy(headerPositions,raw_csv->recStart + raw_csv->header_row, sizeof(unsigned long long)*2, cudaMemcpyDeviceToHost));
unsigned long long start = headerPositions[0];
unsigned long long stop = headerPositions[1];
c=start;
while(c<stop){
if (cmap_data[c]==args->lineterminator){
h_num_cols++;
break;
}
else if(cmap_data[c] == '\r' && (c+1L)<(unsigned long long)raw_csv->num_bytes && cmap_data[c+1] == '\n'){
h_num_cols++;
break;
}else if (cmap_data[c]==args->delimiter)
h_num_cols++;
c++;
}
unsigned long long prev=0;
c=start;
raw_csv->col_names.clear();
if(args->header>=0){
h_num_cols=0;
// Storing the names of the columns into a vector of strings
while(c<=stop){
if (cmap_data[c]==args->delimiter || cmap_data[c]==args->lineterminator){
std::string colName(cmap_data +prev,c-prev );
prev=c+1;
raw_csv->col_names.push_back(colName);
h_num_cols++;
}
c++;
}
skip_header=1;
}else{
for (int i = 0; i<h_num_cols; i++){
std::string newColName = std::to_string(i);
raw_csv->col_names.push_back(newColName);
}
}
// Allocating a boolean array that will use to state if a column needs to read or filtered.
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (h_num_cols));
RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (h_num_cols)),0 ) );
for (int i = 0; i<h_num_cols; i++)
raw_csv->h_parseCol[i]=true;
// Looking for duplicates
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
bool found_dupe = false;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
found_dupe=true;
break;
}
}
if(found_dupe){
int count=1;
for (auto it2 = (it+1); it2 != raw_csv->col_names.end(); it2++){
if (*it==*it2){
if(args->mangle_dupe_cols){
// Replace all the duplicates of column X with X.1,X.2,... First appearance stays as X.
std::string newColName = *it2;
newColName += "." + std::to_string(count);
count++;
*it2 = newColName;
} else{
// All duplicate fields will be ignored.
int pos=std::distance(raw_csv->col_names.begin(), it2);
raw_csv->h_parseCol[pos]=false;
h_dup_cols_removed++;
}
}
}
}
}
raw_csv->num_actual_cols = h_num_cols; // Actuaul number of columns in the CSV file
raw_csv->num_active_cols = h_num_cols-h_dup_cols_removed; // Number of fields that need to be processed based on duplicatation fields
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (h_num_cols), cudaMemcpyHostToDevice));
}
else {
raw_csv->h_parseCol = (bool*)malloc(sizeof(bool) * (args->num_cols));
RMM_TRY( RMM_ALLOC((void**)&raw_csv->d_parseCol,(sizeof(bool) * (args->num_cols)),0 ) );
for (int i = 0; i<raw_csv->num_actual_cols; i++){
raw_csv->h_parseCol[i]=true;
std::string col_name = args->names[i];
raw_csv->col_names.push_back(col_name);
}
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (args->num_cols), cudaMemcpyHostToDevice));
}
// User can give
if (args->use_cols_int!=NULL || args->use_cols_char!=NULL){
if(args->use_cols_int!=NULL){
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
for(int i=0; i < args->use_cols_int_len; i++){
int pos = args->use_cols_int[i];
raw_csv->h_parseCol[pos]=true;
}
raw_csv->num_active_cols = args->use_cols_int_len;
}else{
for (int i = 0; i<raw_csv->num_actual_cols; i++)
raw_csv->h_parseCol[i]=false;
int countFound=0;
for(int i=0; i < args->use_cols_char_len; i++){
std::string colName(args->use_cols_char[i]);
for (auto it = raw_csv->col_names.begin(); it != raw_csv->col_names.end(); it++){
if(colName==*it){
countFound++;
int pos=std::distance(raw_csv->col_names.begin(), it);
raw_csv->h_parseCol[pos]=true;
break;
}
}
}
raw_csv->num_active_cols = countFound;
}
CUDA_TRY(cudaMemcpy(raw_csv->d_parseCol, raw_csv->h_parseCol, sizeof(bool) * (raw_csv->num_actual_cols), cudaMemcpyHostToDevice));
}
raw_csv->num_records -= (args->skiprows + args->skipfooter);
if(skip_header==0){
raw_csv->header_row=-1;
}else{
raw_csv->num_records-=1;
}
//-----------------------------------------------------------------------------
//--- done with host data
close(fd);
munmap(map_data, raw_csv->num_bytes);
//-----------------------------------------------------------------------------
//--- Auto detect types of the vectors
// if(args->dtype==NULL){
if(args->names==NULL){
column_data_t *d_ColumnData,*h_ColumnData;
h_ColumnData = (column_data_t*)malloc(sizeof(column_data_t) * (raw_csv->num_active_cols));
RMM_TRY( RMM_ALLOC((void**)&d_ColumnData,(sizeof(column_data_t) * (raw_csv->num_active_cols)),0 ) );
CUDA_TRY( cudaMemset(d_ColumnData, 0, (sizeof(column_data_t) * (raw_csv->num_active_cols)) ) ) ;
launch_dataTypeDetection(raw_csv, args->skiprows, d_ColumnData);
CUDA_TRY( cudaMemcpy(h_ColumnData,d_ColumnData, sizeof(column_data_t) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost));
vector<gdf_dtype> d_detectedTypes; // host: array of dtypes (since gdf_columns are not created until end)
raw_csv->dtypes.clear();
for(int col = 0; col < raw_csv->num_active_cols; col++){
unsigned long long countInt = h_ColumnData[col].countInt8+h_ColumnData[col].countInt16+
h_ColumnData[col].countInt32+h_ColumnData[col].countInt64;
if (h_ColumnData[col].countNULL == raw_csv->num_records){
d_detectedTypes.push_back(GDF_INT8); // Entire column is NULL. Allocating the smallest amount of memory
} else if(h_ColumnData[col].countString>0L){
d_detectedTypes.push_back(GDF_CATEGORY); // For auto-detection, we are currently not supporting strings.
} else if(h_ColumnData[col].countDateAndTime>0L){
d_detectedTypes.push_back(GDF_DATE64);
} else if(h_ColumnData[col].countFloat > 0L ||
(h_ColumnData[col].countFloat==0L && countInt >0L && h_ColumnData[col].countNULL >0L) ) {
// The second condition has been added to conform to PANDAS which states that a colum of
// integers with a single NULL record need to be treated as floats.
d_detectedTypes.push_back(GDF_FLOAT64);
}
else {
d_detectedTypes.push_back(GDF_INT64);
}
}
raw_csv->dtypes=d_detectedTypes;
free(h_ColumnData);
RMM_TRY( RMM_FREE( d_ColumnData, 0 ) );
}
else{
for ( int x = 0; x < raw_csv->num_actual_cols; x++) {
std::string temp_type = args->dtype[x];
gdf_dtype col_dtype = convertStringToDtype( temp_type );
if (col_dtype == GDF_invalid)
return GDF_UNSUPPORTED_DTYPE;
raw_csv->dtypes.push_back(col_dtype);
}
}
//-----------------------------------------------------------------------------
//--- allocate space for the results
gdf_column **cols = (gdf_column **)malloc( sizeof(gdf_column *) * raw_csv->num_active_cols);
void **d_data,**h_data;
gdf_valid_type **d_valid,**h_valid;
unsigned long long *d_valid_count,*h_valid_count;
gdf_dtype *d_dtypes,*h_dtypes;
h_dtypes = (gdf_dtype*)malloc ( sizeof(gdf_dtype)* (raw_csv->num_active_cols));
h_valid_count = (unsigned long long*)malloc ( sizeof(unsigned long long)* (raw_csv->num_active_cols));
h_data = (void**)malloc ( sizeof(void*)* (raw_csv->num_active_cols));
h_valid = (gdf_valid_type**)malloc ( sizeof(gdf_valid_type*)* (raw_csv->num_active_cols));
RMM_TRY( RMM_ALLOC((void**)&d_dtypes, (sizeof(gdf_dtype) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_data, (sizeof(void *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_valid, (sizeof(gdf_valid_type *) * raw_csv->num_active_cols), 0 ) );
RMM_TRY( RMM_ALLOC((void**)&d_valid_count, (sizeof(unsigned long long) * raw_csv->num_active_cols), 0 ) );
CUDA_TRY( cudaMemset(d_valid_count, 0, (sizeof(unsigned long long) * raw_csv->num_active_cols)) );
int stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
if(raw_csv->dtypes[col]==gdf_dtype::GDF_STRING)
stringColCount++;
}
string_pair **h_str_cols = NULL, **d_str_cols = NULL;
if (stringColCount > 0 ) {
h_str_cols = (string_pair**) malloc ((sizeof(string_pair *) * stringColCount));
RMM_TRY( RMM_ALLOC((void**)&d_str_cols, (sizeof(string_pair *) * stringColCount), 0) );
for (int col = 0; col < stringColCount; col++) {
RMM_TRY( RMM_ALLOC((void**)(h_str_cols + col), sizeof(string_pair) * (raw_csv->num_records), 0) );
}
CUDA_TRY(cudaMemcpy(d_str_cols, h_str_cols, sizeof(string_pair *) * stringColCount, cudaMemcpyHostToDevice));
}
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = (gdf_column *)malloc(sizeof(gdf_column) * 1);
gdf->size = raw_csv->num_records;
gdf->dtype = raw_csv->dtypes[col];
gdf->null_count = 0; // will be filled in later
//--- column name
std::string str = raw_csv->col_names[col];
int len = str.length() + 1;
gdf->col_name = (char *)malloc(sizeof(char) * len);
memcpy(gdf->col_name, str.c_str(), len);
gdf->col_name[len -1] = '\0';
allocateGdfDataSpace(gdf);
cols[col] = gdf;
h_dtypes[col] = raw_csv->dtypes[col];
h_data[col] = gdf->data;
h_valid[col] = gdf->valid;
}
CUDA_TRY( cudaMemcpy(d_dtypes,h_dtypes, sizeof(gdf_dtype) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemcpy(d_data,h_data, sizeof(void*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemcpy(d_valid,h_valid, sizeof(gdf_valid_type*) * (raw_csv->num_active_cols), cudaMemcpyHostToDevice));
free(h_dtypes);
free(h_valid);
free(h_data);
launch_dataConvertColumns(raw_csv,d_data, d_valid, d_dtypes,d_str_cols, args->skiprows, d_valid_count);
cudaDeviceSynchronize();
stringColCount=0;
for (int col = 0; col < raw_csv->num_active_cols; col++) {
gdf_column *gdf = cols[col];
if (gdf->dtype != gdf_dtype::GDF_STRING)
continue;
NVStrings* const stringCol = NVStrings::create_from_index(h_str_cols[stringColCount],size_t(raw_csv->num_records));
if ((raw_csv->quotechar != '\0') && (raw_csv->doublequote==true)) {
// In PANDAS, default of enabling doublequote for two consecutive
// quotechar in quote fields results in reduction to single
std::string quotechar = std::string(&raw_csv->quotechar);
std::string doublequotechar = quotechar + raw_csv->quotechar;
gdf->data = stringCol->replace(doublequotechar.c_str(), quotechar.c_str());
NVStrings::destroy(stringCol);
}
else {
gdf->data = stringCol;
}
RMM_TRY( RMM_FREE( h_str_cols [stringColCount], 0 ) );
stringColCount++;
}
CUDA_TRY( cudaMemcpy(h_valid_count,d_valid_count, sizeof(unsigned long long) * (raw_csv->num_active_cols), cudaMemcpyDeviceToHost));
//--- set the null count
for ( int col = 0; col < raw_csv->num_active_cols; col++) {
cols[col]->null_count = raw_csv->num_records - h_valid_count[col];
}
free(h_valid_count);
// free up space that is no longer needed
if (h_str_cols != NULL)
free ( h_str_cols);
free(raw_csv->h_parseCol);
if (d_str_cols != NULL)
RMM_TRY( RMM_FREE( d_str_cols, 0 ) );
RMM_TRY( RMM_FREE( d_valid, 0 ) );
RMM_TRY( RMM_FREE( d_valid_count, 0 ) );
RMM_TRY( RMM_FREE( d_dtypes, 0 ) );
RMM_TRY( RMM_FREE( d_data, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->recStart, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->d_parseCol, 0 ) );
RMM_TRY( RMM_FREE( raw_csv->d_num_records, 0 ) );
CUDA_TRY( cudaFree ( raw_csv->data) );
args->data = cols;
args->num_cols_out = raw_csv->num_active_cols;
args->num_rows_out = raw_csv->num_records;
delete raw_csv;
return error;
}
/*
* What is passed in is the data type as a string, need to convert that into gdf_dtype enum
*/
gdf_dtype convertStringToDtype(std::string &dtype) {
if (dtype.compare( "str") == 0) return GDF_STRING;
if (dtype.compare( "date") == 0) return GDF_DATE64;
if (dtype.compare( "date32") == 0) return GDF_DATE32;
if (dtype.compare( "date64") == 0) return GDF_DATE64;
if (dtype.compare( "timestamp") == 0) return GDF_TIMESTAMP;
if (dtype.compare( "category") == 0) return GDF_CATEGORY;
if (dtype.compare( "float") == 0) return GDF_FLOAT32;
if (dtype.compare( "float32") == 0) return GDF_FLOAT32;
if (dtype.compare( "float64") == 0) return GDF_FLOAT64;
if (dtype.compare( "double") == 0) return GDF_FLOAT64;
if (dtype.compare( "short") == 0) return GDF_INT16;
if (dtype.compare( "int") == 0) return GDF_INT32;
if (dtype.compare( "int32") == 0) return GDF_INT32;
if (dtype.compare( "int64") == 0) return GDF_INT64;
if (dtype.compare( "long") == 0) return GDF_INT64;
return GDF_invalid;
}
/*
* Create the raw_csv_t structure and allocate space on the GPU
*/
gdf_error updateRawCsv( const char * data, long num_bytes, raw_csv_t * raw ) {
int num_bits = (num_bytes + 63) / 64;
CUDA_TRY( cudaMallocManaged ((void**)&raw->data, (sizeof(char) * num_bytes)));
// RMM_TRY( RMM_ALLOC((void**)&raw->data, (sizeof(char) * num_bytes),0 ));
RMM_TRY( RMM_ALLOC((void**)&raw->d_num_records, sizeof(unsigned long long),0) );
CUDA_TRY( cudaMemcpy(raw->data, data, num_bytes, cudaMemcpyHostToDevice));
CUDA_TRY( cudaMemset(raw->d_num_records,0, ((sizeof(long)) )) );
raw->num_bits = num_bits;
return GDF_SUCCESS;
}
/*
* For each of the gdf_cvolumns, create the on-device space. the on-host fields should already be filled in
*/
gdf_error allocateGdfDataSpace(gdf_column *gdf) {
long N = gdf->size;
long num_bitmaps = (N + 31) / 8; // 8 bytes per bitmap
//--- allocate space for the valid bitmaps
RMM_TRY( RMM_ALLOC((void**)&gdf->valid, (sizeof(gdf_valid_type) * num_bitmaps), 0) );
CUDA_TRY(cudaMemset(gdf->valid, 0, (sizeof(gdf_valid_type) * num_bitmaps)) );
int elementSize=0;
//--- Allocate space for the data
switch(gdf->dtype) {
case gdf_dtype::GDF_INT8:
elementSize = sizeof(int8_t);
break;
case gdf_dtype::GDF_INT16:
elementSize = sizeof(int16_t);
break;
case gdf_dtype::GDF_INT32:
elementSize = sizeof(int32_t);
break;
case gdf_dtype::GDF_INT64:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_FLOAT32:
elementSize = sizeof(float);
break;
case gdf_dtype::GDF_FLOAT64:
elementSize = sizeof(double);
break;
case gdf_dtype::GDF_DATE32:
elementSize = sizeof(gdf_date32);
break;
case gdf_dtype::GDF_DATE64:
elementSize = sizeof(gdf_date64);
break;
case gdf_dtype::GDF_TIMESTAMP:
elementSize = sizeof(int64_t);
break;
case gdf_dtype::GDF_CATEGORY:
elementSize = sizeof(gdf_category);
break;
case gdf_dtype::GDF_STRING:
return gdf_error::GDF_SUCCESS;
// Memory for gdf->data allocated by string class eventually
default:
return GDF_UNSUPPORTED_DTYPE;
}
RMM_TRY( RMM_ALLOC((void**)&gdf->data, elementSize * N, 0) );
return gdf_error::GDF_SUCCESS;
}
//----------------------------------------------------------------------------------------------------------------
// CUDA Kernels
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_countRecords(raw_csv_t * csvData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, countRecords) );
// Calculate actual block count to use based on bitmap count
// Each bitmap is for a 64-byte chunk, and each data index is bitmap ID * 64
int gridSize = (csvData->num_bits + blockSize - 1) / blockSize;
countRecords <<< gridSize, blockSize >>> (
csvData->data, csvData->terminator, csvData->quotechar,
csvData->num_bytes, csvData->num_bits, csvData->d_num_records
);
CUDA_TRY(cudaGetLastError());
long recs=-1;
CUDA_TRY(cudaMemcpy(&recs, csvData->d_num_records, sizeof(long), cudaMemcpyDeviceToHost));
csvData->num_records=recs;
CUDA_TRY(cudaGetLastError());
return GDF_SUCCESS;
}
__global__ void countRecords(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if (tid >= num_bits)
return;
// data ID is a multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
// process the data
long tokenCount = 0;
for (long x = 0; x < byteToProcess; x++) {
// Scan and log records. If quotations are enabled, then also log quotes
// for a postprocess ignore, as the chunk here has limited visibility.
if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) {
tokenCount++;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
tokenCount++;
}
}
atomicAdd((unsigned long long int*)num_records,(unsigned long long int)tokenCount);
}
gdf_error launch_storeRecordStart(raw_csv_t * csvData) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, storeRecordStart) );
// Calculate actual block count to use based on bitmap count
// Each bitmap is for a 64-byte chunk, and each data index is bitmap ID * 64
int gridSize = (csvData->num_bits + blockSize - 1) / blockSize;
storeRecordStart <<< gridSize, blockSize >>> (
csvData->data, csvData->terminator, csvData->quotechar,
csvData->num_bytes, csvData->num_bits, csvData->d_num_records,
csvData->recStart
);
CUDA_TRY( cudaGetLastError() );
return GDF_SUCCESS;
}
__global__ void storeRecordStart(char *data, const char terminator, const char quotechar, long num_bytes, long num_bits, unsigned long long* num_records,unsigned long long* recStart) {
// thread IDs range per block, so also need the block id
long tid = threadIdx.x + (blockDim.x * blockIdx.x);
if ( tid >= num_bits)
return;
// data ID - multiple of 64
long did = tid * 64L;
char *raw = (data + did);
long byteToProcess = ((did + 64L) < num_bytes) ? 64L : (num_bytes - did);
if(tid==0){
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+0;
}
// process the data
for (long x = 0; x < byteToProcess; x++) {
// Scan and log records. If quotations are enabled, then also log quotes
// for a postprocess ignore, as the chunk here has limited visibility.
if ((raw[x] == terminator) || (quotechar != '\0' && raw[x] == quotechar)) {
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
} else if (raw[x] == '\r' && (x+1L)<num_bytes && raw[x +1] == '\n') {
x++;
long pos = atomicAdd((unsigned long long int*)num_records,(unsigned long long int)1);
recStart[pos]=did+x+1;
}
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataConvertColumns(raw_csv_t *raw_csv, void **gdf, gdf_valid_type** valid, gdf_dtype* d_dtypes,string_pair **str_cols, long row_offset, unsigned long long *num_valid) {
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, convertCsvToGdf) );
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
parsing_opts_t opts;
opts.delimiter = raw_csv->delimiter;
opts.terminator = raw_csv->terminator;
opts.quotechar = raw_csv->quotechar;
opts.keepquotes = raw_csv->keepquotes;
convertCsvToGdf <<< gridSize, blockSize >>>(
raw_csv->data,
opts,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
d_dtypes,
gdf,
valid,
str_cols,
row_offset,
raw_csv->header_row,
raw_csv->dayfirst,
num_valid
);
CUDA_TRY( cudaGetLastError() );
return GDF_SUCCESS;
}
/*
* Data is processed in one row\record at a time - so the number of total threads (tid) is equal to the number of rows.
*
*/
__global__ void convertCsvToGdf(
char *raw_csv,
const parsing_opts_t opts,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
gdf_dtype *dtype,
void **gdf_data,
gdf_valid_type **valid,
string_pair **str_cols,
unsigned long long row_offset,
long header_row,
bool dayfirst,
unsigned long long *num_valid
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
int stringCol = 0;
bool quotation = false;
while(col<num_columns){
if(start>stop)
break;
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
if(parseCol[col]==true){
long tempPos=pos-1;
if(dtype[col] != gdf_dtype::GDF_CATEGORY && dtype[col] != gdf_dtype::GDF_STRING){
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
}
if(start<=(tempPos)) { // Empty strings are not legal values
switch(dtype[col]) {
case gdf_dtype::GDF_INT8:
{
int8_t *gdf_out = (int8_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int8_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT16: {
int16_t *gdf_out = (int16_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int16_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT32:
{
int32_t *gdf_out = (int32_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int32_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_INT64:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT32:
{
float *gdf_out = (float *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<float>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_FLOAT64:
{
double *gdf_out = (double *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoFloat<double>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_DATE32:
{
gdf_date32 *gdf_out = (gdf_date32 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_DATE64:
{
gdf_date64 *gdf_out = (gdf_date64 *)gdf_data[actual_col];
gdf_out[rec_id] = parseDateTimeFormat(raw_csv, start, tempPos, dayfirst);
}
break;
case gdf_dtype::GDF_TIMESTAMP:
{
int64_t *gdf_out = (int64_t *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
}
break;
case gdf_dtype::GDF_CATEGORY:
{
gdf_category *gdf_out = (gdf_category *)gdf_data[actual_col];
gdf_out[rec_id] = convertStrtoHash(raw_csv, start, pos, HASH_SEED);
}
break;
case gdf_dtype::GDF_STRING:
{
long end = pos;
if(opts.keepquotes==false){
if((raw_csv[start] == opts.quotechar) && (raw_csv[end-1] == opts.quotechar)){
start++;
end--;
}
}
str_cols[stringCol][rec_id].first = raw_csv+start;
str_cols[stringCol][rec_id].second = size_t(end-start);
stringCol++;
}
break;
default:
break;
}
// set the valid bitmap - all bits were set to 0 to start
int bitmapIdx = whichBitmap(rec_id); // which bitmap
int bitIdx = whichBit(rec_id); // which bit - over an 8-bit index
setBit(valid[col]+bitmapIdx, bitIdx); // This is done with atomics
atomicAdd((unsigned long long int*)&num_valid[col],(unsigned long long int)1);
}
else if(dtype[col]==gdf_dtype::GDF_STRING){
str_cols[stringCol][rec_id].first = NULL;
str_cols[stringCol][rec_id].second = 0;
stringCol++;
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
gdf_error launch_dataTypeDetection(
raw_csv_t * raw_csv,
long row_offset,
column_data_t* d_columnData)
{
int blockSize; // suggested thread count to use
int minGridSize; // minimum block count required
CUDA_TRY( cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, dataTypeDetection) );
// Calculate actual block count to use based on records count
int gridSize = (raw_csv->num_records + blockSize - 1) / blockSize;
parsing_opts_t opts;
opts.delimiter = raw_csv->delimiter;
opts.terminator = raw_csv->terminator;
opts.quotechar = raw_csv->quotechar;
opts.keepquotes = raw_csv->keepquotes;
dataTypeDetection <<< gridSize, blockSize >>>(
raw_csv->data,
opts,
raw_csv->num_records,
raw_csv->num_actual_cols,
raw_csv->d_parseCol,
raw_csv->recStart,
row_offset,
raw_csv->header_row,
d_columnData
);
CUDA_TRY( cudaGetLastError() );
return GDF_SUCCESS;
}
/*
*/
__global__ void dataTypeDetection(
char *raw_csv,
const parsing_opts_t opts,
unsigned long long num_records,
int num_columns,
bool *parseCol,
unsigned long long *recStart,
unsigned long long row_offset,
long header_row,
column_data_t* d_columnData
)
{
// thread IDs range per block, so also need the block id
long rec_id = threadIdx.x + (blockDim.x * blockIdx.x); // this is entry into the field array - tid is an elements within the num_entries array
// we can have more threads than data, make sure we are not past the end of the data
if ( rec_id >= num_records)
return;
long extraOff=0;
if(rec_id>=header_row && header_row>=0)
extraOff=1;
long start = recStart[rec_id + row_offset + extraOff];
long stop = recStart[rec_id + 1 + row_offset + extraOff];
long pos = start;
int col = 0;
int actual_col = 0;
bool quotation = false;
// Going through all the columns of a given record
while(col<num_columns){
if(start>stop)
break;
// Finding the breaking point for each column
while(true){
// Use simple logic to ignore control chars between any quote seq
// Handles nominal cases including doublequotes within quotes, but
// may not output exact failures as PANDAS for malformed fields
if(raw_csv[pos] == opts.quotechar){
quotation = !quotation;
}
else if(quotation==false){
if(raw_csv[pos] == opts.delimiter){
break;
}
else if(raw_csv[pos] == opts.terminator){
break;
}
else if(raw_csv[pos] == '\r' && ((pos+1) < stop && raw_csv[pos+1] == '\n')){
stop--;
break;
}
}
if(pos>=stop)
break;
pos++;
}
// Checking if this is a column that the user wants --- user can filter columns
if(parseCol[col]==true){
long tempPos=pos-1;
// Checking if the record is NULL
if(start>(tempPos)){
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
pos++;
start=pos;
col++;
actual_col++;
continue;
}
long countNumber=0;
long countDecimal=0;
long countSlash=0;
long countDash=0;
long countColon=0;
long countString=0;
long strLen=pos-start;
// Remove all pre and post white-spaces. We might find additional NULL fields if the entire entry is made up of only spaces.
removePrePostWhiteSpaces2(raw_csv, &start, &tempPos);
for(long startPos=start; startPos<=tempPos; startPos++){
if(raw_csv[startPos]>= '0' && raw_csv[startPos] <= '9'){
countNumber++;
continue;
}
// Looking for unique characters that will help identify column types.
switch (raw_csv[startPos]){
case '.':
countDecimal++;break;
case '-':
countDash++; break;
case '/':
countSlash++;break;
case ':':
countColon++;break;
default:
countString++;
break;
}
}
if(strLen==0) // Removed spaces ' ' in the pre-processing and thus we can have an empty string.
atomicAdd(& d_columnData[actual_col].countNULL, 1L);
// Integers have to have the length of the string or can be off by one if they start with a minus sign
else if(countNumber==(strLen) || ( strLen>1 && countNumber==(strLen-1) && raw_csv[start]=='-') ){
// Checking to see if we the integer value requires 8,16,32,64 bits.
// This will allow us to allocate the exact amount of memory.
int64_t i = convertStrtoInt<int64_t>(raw_csv, start, tempPos);
if(i >= (1L<<31)){
atomicAdd(& d_columnData[actual_col].countInt64, 1L);
}
else if(i >= (1L<<15)){
atomicAdd(& d_columnData[actual_col].countInt32, 1L);
}
else if(i >= (1L<<7)){
atomicAdd(& d_columnData[actual_col].countInt16, 1L);
}
else
atomicAdd(& d_columnData[actual_col].countInt8, 1L);
}
// Floating point numbers are made up of numerical strings, have to have a decimal sign, and can have a minus sign.
else if((countNumber==(strLen-1) && countDecimal==1) || (strLen>2 && countNumber==(strLen-2) && raw_csv[start]=='-')){
atomicAdd(& d_columnData[actual_col].countFloat, 1L);
}
// The date-time field cannot have more than 3 strings. As such if an entry has more than 3 string characters, it is not
// a data-time field. Also, if a string has multiple decimals, then is not a legit number.
else if(countString > 3 || countDecimal > 1){
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
else {
// A date field can have either one or two '-' or '\'. A legal combination will only have one of them.
// To simplify the process of auto column detection, we are not covering all the date-time formation permutations.
if((countDash>0 && countDash<=2 && countSlash==0)|| (countDash==0 && countSlash>0 && countSlash<=2) ){
if((countColon<=2)){
atomicAdd(& d_columnData[actual_col].countDateAndTime, 1L);
}
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
// Default field is string type.
else{
atomicAdd(& d_columnData[actual_col].countString, 1L);
}
}
actual_col++;
}
pos++;
start=pos;
col++;
}
}
//----------------------------------------------------------------------------------------------------------------
/*
* Return which bit is set
* x is the occurrence: 1 = first, 2 = seconds, ...
*/
__device__ int findSetBit(int tid, long num_bits, uint64_t *r_bits, int x) {
int idx = tid;
if ( x == 0 )
return -1;
int withinBitCount = 0;
int offset = 0;
int found = 0;
uint64_t bitmap = r_bits[idx];
while (found != x)
{
if(bitmap == 0)
{
idx++;
if (idx >= num_bits)
return -1;
bitmap = r_bits[idx];
offset += 64;
withinBitCount = 0;
}
if ( bitmap & 1 ) {
found++; //found a set bit
}
bitmap >>= 1;
++withinBitCount;
}
offset += withinBitCount -1;
return offset;
}
|
37c47a0d545ff16ed7a676c7ac562144526faa24.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
// *****************************************************************************
extern "C" __global__ void SetupDiff(void *ctx, CeedInt Q,
Fields_Cuda fields) {
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
const CeedScalar *x = (const CeedScalar *)fields.inputs[0];
const CeedScalar *J = (const CeedScalar *)fields.inputs[1];
const CeedScalar *w = (const CeedScalar *)fields.inputs[2];
CeedScalar *qd = fields.outputs[0], *true_soln = fields.outputs[1], *rhs = fields.outputs[2];
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < Q;
i += blockDim.x * gridDim.x) {
const CeedScalar J11 = J[i+Q*0];
const CeedScalar J21 = J[i+Q*1];
const CeedScalar J31 = J[i+Q*2];
const CeedScalar J12 = J[i+Q*3];
const CeedScalar J22 = J[i+Q*4];
const CeedScalar J32 = J[i+Q*5];
const CeedScalar J13 = J[i+Q*6];
const CeedScalar J23 = J[i+Q*7];
const CeedScalar J33 = J[i+Q*8];
const CeedScalar A11 = J22*J33 - J23*J32;
const CeedScalar A12 = J13*J32 - J12*J33;
const CeedScalar A13 = J12*J23 - J13*J22;
const CeedScalar A21 = J23*J31 - J21*J33;
const CeedScalar A22 = J11*J33 - J13*J31;
const CeedScalar A23 = J13*J21 - J11*J23;
const CeedScalar A31 = J21*J32 - J22*J31;
const CeedScalar A32 = J12*J31 - J11*J32;
const CeedScalar A33 = J11*J22 - J12*J21;
const CeedScalar qw = w[i] / (J11*A11 + J21*A12 + J31*A13);
qd[i+Q*0] = qw * (A11*A11 + A12*A12 + A13*A13);
qd[i+Q*1] = qw * (A11*A21 + A12*A22 + A13*A23);
qd[i+Q*2] = qw * (A11*A31 + A12*A32 + A13*A33);
qd[i+Q*3] = qw * (A21*A21 + A22*A22 + A23*A23);
qd[i+Q*4] = qw * (A21*A31 + A22*A32 + A23*A33);
qd[i+Q*5] = qw * (A31*A31 + A32*A32 + A33*A33);
const CeedScalar c[3] = { 0, 1., 2. };
const CeedScalar k[3] = { 1., 2., 3. };
true_soln[i] = sin(M_PI*(c[0] + k[0]*x[i+Q*0])) *
sin(M_PI*(c[1] + k[1]*x[i+Q*1])) *
sin(M_PI*(c[2] + k[2]*x[i+Q*2]));
const CeedScalar rho = w[i] * (J11*A11 + J21*A12 + J31*A13);
rhs[i] = rho * M_PI*M_PI * (k[0]*k[0] + k[1]*k[1] + k[2]*k[2]) * true_soln[i];
}
}
extern "C" __global__ void Diff(void *ctx, CeedInt Q,
Fields_Cuda fields) {
const CeedScalar *ug = (const CeedScalar *)fields.inputs[0];
const CeedScalar *qd = (const CeedScalar *)fields.inputs[1];
CeedScalar *vg = fields.outputs[0];
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < Q;
i += blockDim.x * gridDim.x) {
const CeedScalar ug0 = ug[i+Q*0];
const CeedScalar ug1 = ug[i+Q*1];
const CeedScalar ug2 = ug[i+Q*2];
vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1 + qd[i+Q*2]*ug2;
vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*3]*ug1 + qd[i+Q*4]*ug2;
vg[i+Q*2] = qd[i+Q*2]*ug0 + qd[i+Q*4]*ug1 + qd[i+Q*5]*ug2;
}
}
| 37c47a0d545ff16ed7a676c7ac562144526faa24.cu | // Copyright (c) 2017-2018, Lawrence Livermore National Security, LLC.
// Produced at the Lawrence Livermore National Laboratory. LLNL-CODE-734707.
// All Rights reserved. See files LICENSE and NOTICE for details.
//
// This file is part of CEED, a collection of benchmarks, miniapps, software
// libraries and APIs for efficient high-order finite element and spectral
// element discretizations for exascale applications. For more information and
// source code availability see http://github.com/ceed.
//
// The CEED research is supported by the Exascale Computing Project 17-SC-20-SC,
// a collaborative effort of two U.S. Department of Energy organizations (Office
// of Science and the National Nuclear Security Administration) responsible for
// the planning and preparation of a capable exascale ecosystem, including
// software, applications, hardware, advanced system engineering and early
// testbed platforms, in support of the nation's exascale computing imperative.
// *****************************************************************************
extern "C" __global__ void SetupDiff(void *ctx, CeedInt Q,
Fields_Cuda fields) {
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
const CeedScalar *x = (const CeedScalar *)fields.inputs[0];
const CeedScalar *J = (const CeedScalar *)fields.inputs[1];
const CeedScalar *w = (const CeedScalar *)fields.inputs[2];
CeedScalar *qd = fields.outputs[0], *true_soln = fields.outputs[1], *rhs = fields.outputs[2];
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < Q;
i += blockDim.x * gridDim.x) {
const CeedScalar J11 = J[i+Q*0];
const CeedScalar J21 = J[i+Q*1];
const CeedScalar J31 = J[i+Q*2];
const CeedScalar J12 = J[i+Q*3];
const CeedScalar J22 = J[i+Q*4];
const CeedScalar J32 = J[i+Q*5];
const CeedScalar J13 = J[i+Q*6];
const CeedScalar J23 = J[i+Q*7];
const CeedScalar J33 = J[i+Q*8];
const CeedScalar A11 = J22*J33 - J23*J32;
const CeedScalar A12 = J13*J32 - J12*J33;
const CeedScalar A13 = J12*J23 - J13*J22;
const CeedScalar A21 = J23*J31 - J21*J33;
const CeedScalar A22 = J11*J33 - J13*J31;
const CeedScalar A23 = J13*J21 - J11*J23;
const CeedScalar A31 = J21*J32 - J22*J31;
const CeedScalar A32 = J12*J31 - J11*J32;
const CeedScalar A33 = J11*J22 - J12*J21;
const CeedScalar qw = w[i] / (J11*A11 + J21*A12 + J31*A13);
qd[i+Q*0] = qw * (A11*A11 + A12*A12 + A13*A13);
qd[i+Q*1] = qw * (A11*A21 + A12*A22 + A13*A23);
qd[i+Q*2] = qw * (A11*A31 + A12*A32 + A13*A33);
qd[i+Q*3] = qw * (A21*A21 + A22*A22 + A23*A23);
qd[i+Q*4] = qw * (A21*A31 + A22*A32 + A23*A33);
qd[i+Q*5] = qw * (A31*A31 + A32*A32 + A33*A33);
const CeedScalar c[3] = { 0, 1., 2. };
const CeedScalar k[3] = { 1., 2., 3. };
true_soln[i] = sin(M_PI*(c[0] + k[0]*x[i+Q*0])) *
sin(M_PI*(c[1] + k[1]*x[i+Q*1])) *
sin(M_PI*(c[2] + k[2]*x[i+Q*2]));
const CeedScalar rho = w[i] * (J11*A11 + J21*A12 + J31*A13);
rhs[i] = rho * M_PI*M_PI * (k[0]*k[0] + k[1]*k[1] + k[2]*k[2]) * true_soln[i];
}
}
extern "C" __global__ void Diff(void *ctx, CeedInt Q,
Fields_Cuda fields) {
const CeedScalar *ug = (const CeedScalar *)fields.inputs[0];
const CeedScalar *qd = (const CeedScalar *)fields.inputs[1];
CeedScalar *vg = fields.outputs[0];
for (int i = blockIdx.x * blockDim.x + threadIdx.x;
i < Q;
i += blockDim.x * gridDim.x) {
const CeedScalar ug0 = ug[i+Q*0];
const CeedScalar ug1 = ug[i+Q*1];
const CeedScalar ug2 = ug[i+Q*2];
vg[i+Q*0] = qd[i+Q*0]*ug0 + qd[i+Q*1]*ug1 + qd[i+Q*2]*ug2;
vg[i+Q*1] = qd[i+Q*1]*ug0 + qd[i+Q*3]*ug1 + qd[i+Q*4]*ug2;
vg[i+Q*2] = qd[i+Q*2]*ug0 + qd[i+Q*4]*ug1 + qd[i+Q*5]*ug2;
}
}
|
83976c65f78d83b176380cd039fd741a0d68ebcd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "kernel_t.h"
__global__ void global_function (int i, int *j) {
*j = 666;
}
const char* get_kernel() {
return ((const char*) global_function);
}
| 83976c65f78d83b176380cd039fd741a0d68ebcd.cu | #include "kernel_t.h"
__global__ void global_function (int i, int *j) {
*j = 666;
}
const char* get_kernel() {
return ((const char*) global_function);
}
|
1aa54c63eb1a994e23e55424c84399722aa93da5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/******************************************************************************
Hello! Ali here!!
usage information and TODO stuff here...
Ali here again!!!
******************************************************************************/
#include <stdio.h>
#include "gpu.h"
/******************************************************************************/
APopulation initializePop(unsigned int width, unsigned int height){
APopulation P;
P.nThreads.x = 32; // 32 x 32 = 1024 threads per block
P.nThreads.y = 32;
P.nThreads.z = 1;
P.nBlocks.x = (int) ceil(width/32.0); // however many blocks needed for image
P.nBlocks.y = (int) ceil(height/32.0);
P.nBlocks.z = 1;
P.pop_width = P.nBlocks.x * P.nThreads.x; // save this info
P.pop_height = P.nBlocks.y * P.nThreads.y;
P.N = P.pop_width * P.pop_height; // not the same as width and height
hipError_t err;
err = hipMalloc( (void**) &P.rand, P.N*sizeof(hiprandState_t));
if(err != hipSuccess){
printf("cuda error allocating rand = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc( (void**) &P.red, P.N*sizeof(float));
if(err != hipSuccess){
printf("cuda error allocating red = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc( (void**) &P.green, P.N*sizeof(float));
if(err != hipSuccess){
printf("cuda error allocating green = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMalloc( (void**) &P.blue, P.N*sizeof(float));
if(err != hipSuccess){
printf("cuda error allocating red = %s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipLaunchKernelGGL(( setup_rands) , dim3(P.nBlocks), dim3(P.nThreads) , 0, 0, P.rand, time(NULL), P.N);
//----- placeholder for initializing memory with values
// int a[P.N], b[P.N];
// for (int i=0; i<P.N; i++){
// a[i] = -i;
// b[i] = i;
// }
// hipMemcpy(P.dev_a, a, P.N*sizeof(int), cH2D);
// hipMemcpy(P.dev_b, b, P.N*sizeof(int), cH2D);
// ------------------------
return P;
}
/******************************************************************************/
int runIter(APopulation *P, unsigned long tick){
printf("tick = %lu\n", tick);
hipLaunchKernelGGL(( randomize) , dim3(P->nBlocks), dim3(P->nThreads) , 0, 0, P->red, P->rand, P->N);
hipLaunchKernelGGL(( randomize) , dim3(P->nBlocks), dim3(P->nThreads) , 0, 0, P->green, P->rand, P->N);
hipLaunchKernelGGL(( randomize) , dim3(P->nBlocks), dim3(P->nThreads) , 0, 0, P->blue, P->rand, P->N);
hipLaunchKernelGGL(( kernel) , dim3(P->nBlocks), dim3(P->nThreads) , 0, 0, P->red, P->green, P->blue, P->N);
// add <<< P->nBlocks, P->nThreads >>> (P->dev_a, P->dev_b, P->dev_c);
// -- crud...
// int a[P->N], b[P->N], c[P->N];
// hipMemcpy(&a, P->dev_a, P->N * sizeof(int), cD2H);
// hipMemcpy(&b, P->dev_b, P->N * sizeof(int), cD2H);
// hipMemcpy(&c, P->dev_c, P->N * sizeof(int), cD2H);
//
// for(int i = 0; i< P->N; i++){
// printf("%d + %d = %d\n", a[i], b[i], c[i]);
// }
// ----
return 0;
}
/******************************************************************************/
// Mike Brady's Kernel
__global__ void
kernel(float* red, float* green, float* blue, unsigned long N){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N){
red[tid] = .5;
blue[tid] = .7;
green[tid]= .2;
}
}
/******************************************************************************/
__global__ void
setup_rands(hiprandState_t* rand, unsigned long seed, unsigned long N)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N) hiprand_init(seed, tid, 0, &rand[tid]);
}
/******************************************************************************/
__global__ void
randomize(float* array, hiprandState_t* rand, unsigned long N)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N){
hiprandState_t localState = rand[tid]; // get local hiprandState_t as seed
float theRand = hiprand_uniform(&localState); // use to get value from 0-1
rand[tid] = localState; // save new state as previous state for next gen
array[tid] = theRand;
}
}
/******************************************************************************/
void freeGPU(APopulation *P)
{
hipFree(P->red);
hipFree(P->green);
hipFree(P->blue);
hipFree(P->rand);
// hipFree(P->dev_a);
// hipFree(P->dev_b);
// hipFree(P->dev_c);
}
/******************************************************************************/
| 1aa54c63eb1a994e23e55424c84399722aa93da5.cu | /******************************************************************************
Hello! Ali here!!
usage information and TODO stuff here...
Ali here again!!!
******************************************************************************/
#include <stdio.h>
#include "gpu.h"
/******************************************************************************/
APopulation initializePop(unsigned int width, unsigned int height){
APopulation P;
P.nThreads.x = 32; // 32 x 32 = 1024 threads per block
P.nThreads.y = 32;
P.nThreads.z = 1;
P.nBlocks.x = (int) ceil(width/32.0); // however many blocks needed for image
P.nBlocks.y = (int) ceil(height/32.0);
P.nBlocks.z = 1;
P.pop_width = P.nBlocks.x * P.nThreads.x; // save this info
P.pop_height = P.nBlocks.y * P.nThreads.y;
P.N = P.pop_width * P.pop_height; // not the same as width and height
cudaError_t err;
err = cudaMalloc( (void**) &P.rand, P.N*sizeof(curandState));
if(err != cudaSuccess){
printf("cuda error allocating rand = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc( (void**) &P.red, P.N*sizeof(float));
if(err != cudaSuccess){
printf("cuda error allocating red = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc( (void**) &P.green, P.N*sizeof(float));
if(err != cudaSuccess){
printf("cuda error allocating green = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMalloc( (void**) &P.blue, P.N*sizeof(float));
if(err != cudaSuccess){
printf("cuda error allocating red = %s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
setup_rands <<< P.nBlocks, P.nThreads >>> (P.rand, time(NULL), P.N);
//----- placeholder for initializing memory with values
// int a[P.N], b[P.N];
// for (int i=0; i<P.N; i++){
// a[i] = -i;
// b[i] = i;
// }
// cudaMemcpy(P.dev_a, a, P.N*sizeof(int), cH2D);
// cudaMemcpy(P.dev_b, b, P.N*sizeof(int), cH2D);
// ------------------------
return P;
}
/******************************************************************************/
int runIter(APopulation *P, unsigned long tick){
printf("tick = %lu\n", tick);
randomize <<< P->nBlocks, P->nThreads >>> (P->red, P->rand, P->N);
randomize <<< P->nBlocks, P->nThreads >>> (P->green, P->rand, P->N);
randomize <<< P->nBlocks, P->nThreads >>> (P->blue, P->rand, P->N);
kernel <<< P->nBlocks, P->nThreads >>> (P->red, P->green, P->blue, P->N);
// add <<< P->nBlocks, P->nThreads >>> (P->dev_a, P->dev_b, P->dev_c);
// -- crud...
// int a[P->N], b[P->N], c[P->N];
// cudaMemcpy(&a, P->dev_a, P->N * sizeof(int), cD2H);
// cudaMemcpy(&b, P->dev_b, P->N * sizeof(int), cD2H);
// cudaMemcpy(&c, P->dev_c, P->N * sizeof(int), cD2H);
//
// for(int i = 0; i< P->N; i++){
// printf("%d + %d = %d\n", a[i], b[i], c[i]);
// }
// ----
return 0;
}
/******************************************************************************/
// Mike Brady's Kernel
__global__ void
kernel(float* red, float* green, float* blue, unsigned long N){
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N){
red[tid] = .5;
blue[tid] = .7;
green[tid]= .2;
}
}
/******************************************************************************/
__global__ void
setup_rands(curandState* rand, unsigned long seed, unsigned long N)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N) curand_init(seed, tid, 0, &rand[tid]);
}
/******************************************************************************/
__global__ void
randomize(float* array, curandState* rand, unsigned long N)
{
int x = threadIdx.x + (blockIdx.x * blockDim.x);
int y = threadIdx.y + (blockIdx.y * blockDim.y);
unsigned long tid = x + (y * blockDim.x * gridDim.x);
if(tid < N){
curandState localState = rand[tid]; // get local curandState as seed
float theRand = curand_uniform(&localState); // use to get value from 0-1
rand[tid] = localState; // save new state as previous state for next gen
array[tid] = theRand;
}
}
/******************************************************************************/
void freeGPU(APopulation *P)
{
cudaFree(P->red);
cudaFree(P->green);
cudaFree(P->blue);
cudaFree(P->rand);
// cudaFree(P->dev_a);
// cudaFree(P->dev_b);
// cudaFree(P->dev_c);
}
/******************************************************************************/
|
4dca8e7a11ca60a4b418973a7b779ad8d5d111fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentSumLinearKernel(
const void *input, const Nd4jLong *inputShape,
int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, const Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
const T *x;
__shared__
T *z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<const T *>(input);
z = reinterpret_cast<T *>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentSumLinearKernel(
const void *input, const Nd4jLong *inputShape,
const void *indices, const Nd4jLong *indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, const Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
const T *x;
__shared__
T *z;
__shared__
const I *y; //int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<const T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<const I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static __global__ void segmentSumTadKernel(
const void* inputBuf, const Nd4jLong* inputShape, const Nd4jLong* inputTads, const Nd4jLong* inputTadOffsets,
const I* indices,
int* starts, int* lengths, Nd4jLong numOfClasses,
void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* outputTads, const Nd4jLong* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<const T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[indices[idx]])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( segmentSumLinearKernel<T,I>), dim3(numClasses), dim3(input->lengthOf()), numClasses * 32 + 32, *stream, input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(input->sizeAt(0)), dim3(512), 2048, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
hipLaunchKernelGGL(( unsortedSegmentSumLinearKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
hipLaunchKernelGGL(( segmentSumTadKernel<T,I>), dim3(dims.x), dim3(dims.y), dims.z, *stream, input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static __global__ void segmentSumBPLinearKernel(
const void* inputBuf, const Nd4jLong* inputShape,
const void* eps, const Nd4jLong* epsShape,
const void* indicesBuf, const Nd4jLong* indicesShape,
void* outputBuf, const Nd4jLong* outputShape) {
auto x = reinterpret_cast<const T*>(inputBuf);
auto y = reinterpret_cast<const I*>(indicesBuf);
auto z = reinterpret_cast<T*>(outputBuf);
auto gradOut = reinterpret_cast<const T*>(eps);
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSumBPTadKernel(
const void* inputBuf, const Nd4jLong* inputShape,
const void* eps, const Nd4jLong* epsShape,
const void* indicesBuf, const Nd4jLong* indicesShape,
void* outputBuf, const Nd4jLong* outputShape,
const Nd4jLong* inputTad, const Nd4jLong* inputOffsets,
const Nd4jLong* gradOutTad, const Nd4jLong* gradOutOffsets,
const Nd4jLong* outTad, const Nd4jLong* outOffsets) {
__shared__ const T* x;
__shared__ const T* gradOut;
__shared__ const I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<const T*>(inputBuf);
y = reinterpret_cast<const I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<const T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
auto currentOut = z + outOffsets[i];
auto outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static int unsortedSegmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
hipLaunchKernelGGL(( segmentSumBPLinearKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
hipLaunchKernelGGL(( segmentSumBPTadKernel<T,I>), dim3(gradOut->lengthOf()), dim3(input->lengthOf()), 256, *stream, input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} | 4dca8e7a11ca60a4b418973a7b779ad8d5d111fd.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author GS <[email protected]>
//
#include <ops/declarable/helpers/segment.h>
#include <ops/declarable/helpers/segment_common.h>
#include <array/NDArrayFactory.h>
#include <helpers/ShapeUtils.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
// -------------------------------------------------------------------------------------------------------------- //
// Segment ops linear kernels
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
segmentSumLinearKernel(
const void *input, const Nd4jLong *inputShape,
int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, const Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
const T *x;
__shared__
T *z;
__shared__ int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
threadsPerSegment = (gridDim.x + numOfClasses - 1) / numOfClasses;
segment = blockIdx.x / threadsPerSegment;
x = reinterpret_cast<const T *>(input);
z = reinterpret_cast<T *>(output);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
if (segment < numOfClasses) {
zIndex = shape::getIndexOffset(segment, outputShape);
start = starts[segment];
finish = start + lengths[segment];
//val[segment] = ;
z[zIndex] = x[shape::getIndexOffset(start, inputShape)];
}
}
__syncthreads();
for (auto e = start + threadIdx.x + 1; e < finish; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
// -------------------------------------------------------------------------------------------------------------- //
template<typename T, typename I>
static __global__ void
unsortedSegmentSumLinearKernel(
const void *input, const Nd4jLong *inputShape,
const void *indices, const Nd4jLong *indicesShape,
int *starts, int *lengths, Nd4jLong numOfClasses,
void *output, const Nd4jLong *outputShape) {
__shared__
T *val;
__shared__
Nd4jLong xLen, zLen, segment, zIndex;
__shared__
const T *x;
__shared__
T *z;
__shared__
const I *y; //int threadsPerSegment, start, finish;
if (threadIdx.x == 0) {
segment = blockIdx.x;
x = reinterpret_cast<const T *>(input);
z = reinterpret_cast<T *>(output);
y = reinterpret_cast<const I *>(indices);
xLen = shape::length(inputShape);
zLen = shape::length(outputShape);
zIndex = shape::getIndexOffset(segment, outputShape);
if (lengths[segment] > 0)
z[zIndex] = x[shape::getIndexOffset(starts[segment], inputShape)];
else
z[zIndex] = 0; //DataTypeUtils::max<T>();
}
__syncthreads();
if (lengths[segment] > 0)
for (auto e = threadIdx.x; e < xLen; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputShape);
auto yIndex = shape::getIndexOffset(e, indicesShape);
if (y[yIndex] == segment && e != starts[segment]) {
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
// SegmentSum kernel
template <typename T, typename I>
static __global__ void segmentSumTadKernel(
const void* inputBuf, const Nd4jLong* inputShape, const Nd4jLong* inputTads, const Nd4jLong* inputTadOffsets,
const I* indices,
int* starts, int* lengths, Nd4jLong numOfClasses,
void* outputBuf, const Nd4jLong* outputShape, const Nd4jLong* outputTads, const Nd4jLong* outputTadOffsets) {
__shared__ T* val;
__shared__ Nd4jLong len, zIndex, total;
__shared__ T* z;
__shared__ int start, finish;
if (threadIdx.x == 0) {
auto segment = indices[blockIdx.x]; // / threadsPerSegment;
z = reinterpret_cast<T*>(outputBuf) + outputTadOffsets[segment];
len = shape::length(inputTads);
start = starts[segment];
finish = start + lengths[segment];
total = shape::sizeAt(inputShape, 0);
}
__syncthreads();
auto idx = blockIdx.x;
if (blockIdx.x <= total) {
auto x = reinterpret_cast<const T *>(inputBuf) + inputTadOffsets[idx];
if (blockIdx.x == start) {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
else {
for (auto e = threadIdx.x; e < len; e += blockDim.x) {
auto xIndex = shape::getIndexOffset(e, inputTads);
auto zIndex = shape::getIndexOffset(e, outputTads);
if (lengths[indices[idx]])
sd::math::atomics::nd4j_atomicAdd(&z[zIndex], x[xIndex]);
}
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void segmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, NDArray* output) {
auto stream = context->getCudaStream();
Nd4jLong numClasses = indices->e<Nd4jLong>(indices->lengthOf() - 1) + 1;
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numClasses}, context);
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numClasses}, context);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numClasses, indices->lengthOf(), numClasses * 32 + 32);
fillUpSegments(indices, numClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
segmentSumLinearKernel<T,I><<<numClasses, input->lengthOf(), numClasses * 32 + 32, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
segmentSumTadKernel<T,I><<<input->sizeAt(0), 512, 2048, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void segmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), segmentSumFunctor_, (context, input, indices, output), NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static void unsortedSegmentSumFunctor_(sd::LaunchContext* context, NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
// NDArray classes = NDArrayFactory::create<int>('c', {numOfClasses, 2});
NDArray classesRangesBegs = NDArrayFactory::create<int>('c', {numOfClasses}, context);
NDArray classesRangesLens = NDArrayFactory::create<int>('c', {numOfClasses}, context);
// NDArray row = NDArrayFactory::create<int>('c', {1, 2}, {(int)indices->lengthOf(), (int)0});
// classes.applyTrueBroadcast(sd::BroadcastOpsTuple::Assign(), &row, &classes);
classesRangesBegs.assign(indices->lengthOf());
classesRangesLens.assign(0);
dim3 dims(numOfClasses, indices->lengthOf(), (numOfClasses + 1) * 64);
// int* classesBuf = reinterpret_cast<int*>(classes.specialBuffer());
fillUpSegments(indices, numOfClasses, classesRangesBegs, classesRangesLens);
int* begins = reinterpret_cast<int*>(classesRangesBegs.specialBuffer());
int* lengths = reinterpret_cast<int*>(classesRangesLens.specialBuffer());
if (input->isVector()) {
unsortedSegmentSumLinearKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), indices->specialBuffer(), indices->specialShapeInfo(), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo());
}
else {
output->assign(0);
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
dims.x = input->sizeAt(0);
segmentSumTadKernel<T,I><<<dims.x, dims.y, dims.z, *stream>>>(input->specialBuffer(), input->specialShapeInfo(), inputTads, inputTadOffsets, reinterpret_cast<I*>(indices->specialBuffer()), begins, lengths, numOfClasses, output->specialBuffer(), output->specialShapeInfo(), outputTads, outputTadOffsets);
}
}
// -------------------------------------------------------------------------------------------------------------- //
void unsortedSegmentSumFunctor(sd::LaunchContext* context , NDArray* input, NDArray* indices, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices});
output->nullify();
BUILD_DOUBLE_SELECTOR(input->dataType(), indices->dataType(), unsortedSegmentSumFunctor_, (context, input, indices, numOfClasses, output),
NUMERIC_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices});
}
// -------------------------------------------------------------------------------------------------------------- //
// Backpropagate ops
// -------------------------------------------------------------------------------------------------------------- //
// Sorted sum backpropagate
template <typename T, typename I>
static __global__ void segmentSumBPLinearKernel(
const void* inputBuf, const Nd4jLong* inputShape,
const void* eps, const Nd4jLong* epsShape,
const void* indicesBuf, const Nd4jLong* indicesShape,
void* outputBuf, const Nd4jLong* outputShape) {
auto x = reinterpret_cast<const T*>(inputBuf);
auto y = reinterpret_cast<const I*>(indicesBuf);
auto z = reinterpret_cast<T*>(outputBuf);
auto gradOut = reinterpret_cast<const T*>(eps);
__shared__ Nd4jLong xLen, gradLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
gradLen = shape::length(epsShape);
}
__syncthreads();
auto start = blockIdx.x * blockDim.x + threadIdx.x;
auto step = gridDim.x * blockDim.x;
for (auto e = start; e < xLen; e += step) {
auto zOffset = shape::getIndexOffset(e, outputShape);
auto xOffset = shape::getIndexOffset(e, inputShape);
auto yOffset = shape::getIndexOffset(e, indicesShape);
auto classIndex = y[yOffset];
auto gradOffsetO = shape::getIndexOffset(classIndex, epsShape);
z[zOffset] = gradOut[gradOffsetO];
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
static __global__ void segmentSumBPTadKernel(
const void* inputBuf, const Nd4jLong* inputShape,
const void* eps, const Nd4jLong* epsShape,
const void* indicesBuf, const Nd4jLong* indicesShape,
void* outputBuf, const Nd4jLong* outputShape,
const Nd4jLong* inputTad, const Nd4jLong* inputOffsets,
const Nd4jLong* gradOutTad, const Nd4jLong* gradOutOffsets,
const Nd4jLong* outTad, const Nd4jLong* outOffsets) {
__shared__ const T* x;
__shared__ const T* gradOut;
__shared__ const I* y;
__shared__ T* z;
__shared__ Nd4jLong xLen, yLen, gradLen, currentLen;
if (threadIdx.x == 0) {
xLen = shape::length(inputShape);
x = reinterpret_cast<const T*>(inputBuf);
y = reinterpret_cast<const I*>(indicesBuf);
z = reinterpret_cast<T*>(outputBuf);
yLen = shape::length(indicesShape);
gradOut = reinterpret_cast<const T*>(eps);
gradLen = shape::length(epsShape);
currentLen = shape::length(outTad);
}
__syncthreads();
for (auto i = blockIdx.x; i < yLen; i += gridDim.x) {
auto yIndex = shape::getIndexOffset(i, indicesShape);
auto segment = y[yIndex];
auto currentOut = z + outOffsets[i];
auto outGrad = gradOut + gradOutOffsets[segment];
for (auto e = threadIdx.x; e < currentLen; e += blockDim.x) {
currentOut[e] = outGrad[e];
}
}
}
// -------------------------------------------------------------------------------------------------------------- //
template <typename T, typename I>
int segmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int segmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return segmentSumFunctorBP_, (context, input,
indices, gradOut, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
template <typename T, typename I>
static int unsortedSegmentSumFunctorBP_(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
auto stream = context->getCudaStream();
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
if (input->isVector()) {
Nd4jLong loop_size = input->lengthOf();
auto numOfClasses = gradOut->lengthOf(); //indices->e<Nd4jLong>(loop_size - 1);
segmentSumBPLinearKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(),
input->specialShapeInfo(), gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo());
}
else {
std::vector<int> dimensions = ShapeUtils::evalDimsToExclude(input->rankOf(), {0});
auto packX = sd::ConstantTadHelper::getInstance().tadForDimensions(input->shapeInfo(), dimensions);
auto packZ = sd::ConstantTadHelper::getInstance().tadForDimensions(output->shapeInfo(), dimensions);
auto packGradOut = sd::ConstantTadHelper::getInstance().tadForDimensions(gradOut->shapeInfo(), dimensions);
auto inputTads = packX.specialShapeInfo();
auto inputTadOffsets = packX.specialOffsets();
auto outputTads = packZ.specialShapeInfo();
auto outputTadOffsets = packZ.specialOffsets();
auto gradOutTads = packGradOut.specialShapeInfo();
auto gradOutTadOffsets = packGradOut.specialOffsets();
segmentSumBPTadKernel<T,I><<<gradOut->lengthOf(), input->lengthOf(), 256, *stream>>>(input->specialBuffer(), input->specialShapeInfo(),
gradOut->specialBuffer(), gradOut->specialShapeInfo(),
indices->specialBuffer(), indices->specialShapeInfo(), output->specialBuffer(), output->specialShapeInfo(),
inputTads, inputTadOffsets, gradOutTads, gradOutTadOffsets,
outputTads, outputTadOffsets);
}
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
return Status::OK();
}
// -------------------------------------------------------------------------------------------------------------- //
int unsortedSegmentSumFunctorBP(sd::LaunchContext* context , NDArray* input, NDArray* indices, NDArray* gradOut, Nd4jLong numOfClasses, NDArray* output) {
NDArray::prepareSpecialUse({output}, {input, indices, gradOut});
BUILD_DOUBLE_SELECTOR(output->dataType(), indices->dataType(), return unsortedSegmentSumFunctorBP_, (context, input, indices, gradOut, numOfClasses, output), FLOAT_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({output}, {input, indices, gradOut});
}
}
}
} |
69f10d2af8879d9a69a11781c7e5fda2d1c20792.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/psroi_pooling_v2_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void PSROIPoolInitKernel(size_t size_init, T *input) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size_init;
thread_idx += blockDim.x * gridDim.x) {
input[thread_idx] = static_cast<T>(.0);
}
}
template <typename T>
__global__ void PSROIPoolBackwardV2(const int nthreads, T *input_diff, const T spatial_scale, const int feature_height,
const int feature_width, const int feature_channels, const int pooled_height,
const int pooled_width, const int output_channels, T *output_diff, T *roi_boxes,
int batch_size, int rois_num, int group_size) {
const int elements_per_roi_box = 5;
// Loop over the outputs of forward operator.
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) {
int width_offset_n = index % pooled_width;
int height_offset_n = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_channels;
int n_batch = n / rois_num;
int n_rois_num = n % rois_num;
// find pooling box index
T *p_roi_batch_index = roi_boxes + n_batch * (rois_num * elements_per_roi_box) + n_rois_num;
int roi_batch_index = static_cast<int>(*p_roi_batch_index);
T *p_roi_start_width = p_roi_batch_index + rois_num;
T roi_start_width = static_cast<T>(roundf((*p_roi_start_width) * spatial_scale));
T *p_roi_start_height = p_roi_start_width + rois_num;
T roi_start_height = static_cast<T>(roundf((*p_roi_start_height) * spatial_scale));
T *p_roi_end_width = p_roi_start_height + rois_num;
T roi_end_width = static_cast<T>(roundf((*p_roi_end_width) * spatial_scale));
T *p_roi_end_height = p_roi_end_width + rois_num;
T roi_end_height = static_cast<T>(roundf((*p_roi_end_height) * spatial_scale));
// let min roi len and width bigger than 0.1
T roi_width = max(roi_end_width - roi_start_width, 0.1);
T roi_height = max(roi_end_height - roi_start_height, 0.1);
// Compute bin_width and bin_height
T bin_height = roi_height / static_cast<T>(pooled_height);
T bin_width = roi_width / static_cast<T>(pooled_width);
// compute pooling area's position
int pooling_start_x = floor(static_cast<float>(static_cast<T>(height_offset_n) * bin_height + roi_start_height));
int pooling_start_y = floor(static_cast<float>(static_cast<T>(width_offset_n) * bin_width + roi_start_width));
int pooling_end_x = ceil(static_cast<float>(static_cast<T>(height_offset_n + 1) * bin_height + roi_start_height));
int pooling_end_y = ceil(static_cast<float>(static_cast<T>(width_offset_n + 1) * bin_width + roi_start_width));
// Add roi offsets and clip to input boundaries
pooling_start_x = min(max(pooling_start_x, 0), feature_height);
pooling_end_x = min(max(pooling_end_x, 0), feature_height);
pooling_start_y = min(max(pooling_start_y, 0), feature_width);
pooling_end_y = min(max(pooling_end_y, 0), feature_width);
bool is_empty = (pooling_end_x <= pooling_start_x) || (pooling_end_y <= pooling_start_y);
int c = index % (pooled_height * pooled_width * output_channels);
T *offset_bottom_diff = output_diff + (roi_batch_index * feature_channels + c) * feature_height * feature_width;
T bin_area = (pooling_end_x - pooling_start_x) * (pooling_end_y - pooling_start_y);
T diff_val = is_empty ? T(0.) : input_diff[index] / bin_area;
for (int h = pooling_start_x; h < pooling_end_x; ++h) {
for (int w = pooling_start_y; w < pooling_end_y; ++w) {
int bottom_index = h * feature_width + w;
MsAtomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template <typename T>
void PSROIPoolBackwardV2Launcher(T *input_diff, const int batch_size, const int output_n, const T spatial_scale,
const int feature_channels, const int feature_height, const int feature_width,
const int pooled_width, const int pooled_height, const int output_channels,
T *output_diff, T *roi_boxes, hipStream_t stream, int rois_num, int group_size) {
size_t size_init = batch_size * feature_channels * feature_height * feature_width;
hipLaunchKernelGGL(( PSROIPoolInitKernel), dim3(GET_BLOCKS(size_init)), dim3(GET_THREADS), 0, stream, size_init, output_diff);
const int kThreadsPerBlock_ = 1024;
const int output_size = output_channels * pooled_height * pooled_width * output_n;
hipError_t err;
hipLaunchKernelGGL(( PSROIPoolBackwardV2), dim3((output_size + kThreadsPerBlock_ - 1) / kThreadsPerBlock_), dim3(kThreadsPerBlock_), 0, stream,
output_size, input_diff, spatial_scale, feature_height, feature_width, feature_channels, pooled_height,
pooled_width, output_channels, output_diff, roi_boxes, batch_size, rois_num, group_size);
err = hipGetLastError();
if (hipSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", hipGetErrorString(err));
exit(-1);
}
}
template CUDA_LIB_EXPORT void PSROIPoolBackwardV2Launcher<float>(
float *input_diff, const int batch_size, const int output_n, const float spatial_scale, const int feature_channels,
const int feature_height, const int feature_width, const int pooled_width, const int pooled_height,
const int output_channels, float *output_diff, float *roi_boxes, hipStream_t stream, int rois_num, int group_size);
template CUDA_LIB_EXPORT void PSROIPoolBackwardV2Launcher<half>(
half *input_diff, const int batch_size, const int output_n, const half spatial_scale, const int feature_channels,
const int feature_height, const int feature_width, const int pooled_width, const int pooled_height,
const int output_channels, half *output_diff, half *roi_boxes, hipStream_t stream, int rois_num, int group_size);
| 69f10d2af8879d9a69a11781c7e5fda2d1c20792.cu | /**
* Copyright 2021 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <algorithm>
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/psroi_pooling_v2_impl.cuh"
#include "plugin/device/gpu/kernel/cuda_impl/cuda_ops/util.cuh"
template <typename T>
__global__ void PSROIPoolInitKernel(size_t size_init, T *input) {
for (int thread_idx = blockIdx.x * blockDim.x + threadIdx.x; thread_idx < size_init;
thread_idx += blockDim.x * gridDim.x) {
input[thread_idx] = static_cast<T>(.0);
}
}
template <typename T>
__global__ void PSROIPoolBackwardV2(const int nthreads, T *input_diff, const T spatial_scale, const int feature_height,
const int feature_width, const int feature_channels, const int pooled_height,
const int pooled_width, const int output_channels, T *output_diff, T *roi_boxes,
int batch_size, int rois_num, int group_size) {
const int elements_per_roi_box = 5;
// Loop over the outputs of forward operator.
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < nthreads; index += blockDim.x * gridDim.x) {
int width_offset_n = index % pooled_width;
int height_offset_n = (index / pooled_width) % pooled_height;
int n = index / pooled_width / pooled_height / output_channels;
int n_batch = n / rois_num;
int n_rois_num = n % rois_num;
// find pooling box index
T *p_roi_batch_index = roi_boxes + n_batch * (rois_num * elements_per_roi_box) + n_rois_num;
int roi_batch_index = static_cast<int>(*p_roi_batch_index);
T *p_roi_start_width = p_roi_batch_index + rois_num;
T roi_start_width = static_cast<T>(roundf((*p_roi_start_width) * spatial_scale));
T *p_roi_start_height = p_roi_start_width + rois_num;
T roi_start_height = static_cast<T>(roundf((*p_roi_start_height) * spatial_scale));
T *p_roi_end_width = p_roi_start_height + rois_num;
T roi_end_width = static_cast<T>(roundf((*p_roi_end_width) * spatial_scale));
T *p_roi_end_height = p_roi_end_width + rois_num;
T roi_end_height = static_cast<T>(roundf((*p_roi_end_height) * spatial_scale));
// let min roi len and width bigger than 0.1
T roi_width = max(roi_end_width - roi_start_width, 0.1);
T roi_height = max(roi_end_height - roi_start_height, 0.1);
// Compute bin_width and bin_height
T bin_height = roi_height / static_cast<T>(pooled_height);
T bin_width = roi_width / static_cast<T>(pooled_width);
// compute pooling area's position
int pooling_start_x = floor(static_cast<float>(static_cast<T>(height_offset_n) * bin_height + roi_start_height));
int pooling_start_y = floor(static_cast<float>(static_cast<T>(width_offset_n) * bin_width + roi_start_width));
int pooling_end_x = ceil(static_cast<float>(static_cast<T>(height_offset_n + 1) * bin_height + roi_start_height));
int pooling_end_y = ceil(static_cast<float>(static_cast<T>(width_offset_n + 1) * bin_width + roi_start_width));
// Add roi offsets and clip to input boundaries
pooling_start_x = min(max(pooling_start_x, 0), feature_height);
pooling_end_x = min(max(pooling_end_x, 0), feature_height);
pooling_start_y = min(max(pooling_start_y, 0), feature_width);
pooling_end_y = min(max(pooling_end_y, 0), feature_width);
bool is_empty = (pooling_end_x <= pooling_start_x) || (pooling_end_y <= pooling_start_y);
int c = index % (pooled_height * pooled_width * output_channels);
T *offset_bottom_diff = output_diff + (roi_batch_index * feature_channels + c) * feature_height * feature_width;
T bin_area = (pooling_end_x - pooling_start_x) * (pooling_end_y - pooling_start_y);
T diff_val = is_empty ? T(0.) : input_diff[index] / bin_area;
for (int h = pooling_start_x; h < pooling_end_x; ++h) {
for (int w = pooling_start_y; w < pooling_end_y; ++w) {
int bottom_index = h * feature_width + w;
MsAtomicAdd(offset_bottom_diff + bottom_index, diff_val);
}
}
}
}
template <typename T>
void PSROIPoolBackwardV2Launcher(T *input_diff, const int batch_size, const int output_n, const T spatial_scale,
const int feature_channels, const int feature_height, const int feature_width,
const int pooled_width, const int pooled_height, const int output_channels,
T *output_diff, T *roi_boxes, cudaStream_t stream, int rois_num, int group_size) {
size_t size_init = batch_size * feature_channels * feature_height * feature_width;
PSROIPoolInitKernel<<<GET_BLOCKS(size_init), GET_THREADS, 0, stream>>>(size_init, output_diff);
const int kThreadsPerBlock_ = 1024;
const int output_size = output_channels * pooled_height * pooled_width * output_n;
cudaError_t err;
PSROIPoolBackwardV2<<<(output_size + kThreadsPerBlock_ - 1) / kThreadsPerBlock_, kThreadsPerBlock_, 0, stream>>>(
output_size, input_diff, spatial_scale, feature_height, feature_width, feature_channels, pooled_height,
pooled_width, output_channels, output_diff, roi_boxes, batch_size, rois_num, group_size);
err = cudaGetLastError();
if (cudaSuccess != err) {
fprintf(stderr, "cudaCheckError() failed : %s\n", cudaGetErrorString(err));
exit(-1);
}
}
template CUDA_LIB_EXPORT void PSROIPoolBackwardV2Launcher<float>(
float *input_diff, const int batch_size, const int output_n, const float spatial_scale, const int feature_channels,
const int feature_height, const int feature_width, const int pooled_width, const int pooled_height,
const int output_channels, float *output_diff, float *roi_boxes, cudaStream_t stream, int rois_num, int group_size);
template CUDA_LIB_EXPORT void PSROIPoolBackwardV2Launcher<half>(
half *input_diff, const int batch_size, const int output_n, const half spatial_scale, const int feature_channels,
const int feature_height, const int feature_width, const int pooled_width, const int pooled_height,
const int output_channels, half *output_diff, half *roi_boxes, cudaStream_t stream, int rois_num, int group_size);
|
8f1aba846be6d35d5fcecff87ce6e385a14b9a68.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top;
int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top;
int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top;
int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top;
int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_top*(y)+xdim0_update_halo_kernel2_zvel_plus_2_top*ydim0_update_halo_kernel2_zvel_plus_2_top*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_top*(y)+xdim1_update_halo_kernel2_zvel_plus_2_top*ydim1_update_halo_kernel2_zvel_plus_2_top*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_top_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,-2,0)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,-2,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) {
hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0;
hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0;
hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1;
hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_top), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
}
ops_enqueue_kernel(desc);
}
#endif
| 8f1aba846be6d35d5fcecff87ce6e385a14b9a68.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel2_zvel_plus_2_top;
int xdim0_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int ydim0_update_halo_kernel2_zvel_plus_2_top;
int ydim0_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int xdim1_update_halo_kernel2_zvel_plus_2_top;
int xdim1_update_halo_kernel2_zvel_plus_2_top_h = -1;
__constant__ int ydim1_update_halo_kernel2_zvel_plus_2_top;
int ydim1_update_halo_kernel2_zvel_plus_2_top_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_top*(y)+xdim0_update_halo_kernel2_zvel_plus_2_top*ydim0_update_halo_kernel2_zvel_plus_2_top*(z))
#define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_top*(y)+xdim1_update_halo_kernel2_zvel_plus_2_top*ydim1_update_halo_kernel2_zvel_plus_2_top*(z))
//user function
__device__
inline void update_halo_kernel2_zvel_plus_2_top_gpu(double *zvel0, double *zvel1, const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,-2,0)];
if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,-2,0)];
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel2_zvel_plus_2_top(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_top + idx_z * 1*1 * xdim0_update_halo_kernel2_zvel_plus_2_top * ydim0_update_halo_kernel2_zvel_plus_2_top;
arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_top + idx_z * 1*1 * xdim1_update_halo_kernel2_zvel_plus_2_top * ydim1_update_halo_kernel2_zvel_plus_2_top;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel2_zvel_plus_2_top_gpu(arg0, arg1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,51)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
OPS_kernels[51].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned) return;
for ( int n=0; n<3; n++ ){
start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n];
if (start[n] >= range[2*n]) {
start[n] = 0;
}
else {
start[n] = range[2*n] - start[n];
}
if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n];
if (end[n] >= range[2*n+1]) {
end[n] = range[2*n+1] - sb->decomp_disp[n];
}
else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n]))
end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]);
}
#else
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_top_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_top_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_top_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_top_h) {
cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_top, &xdim0, sizeof(int) );
xdim0_update_halo_kernel2_zvel_plus_2_top_h = xdim0;
cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_top, &ydim0, sizeof(int) );
ydim0_update_halo_kernel2_zvel_plus_2_top_h = ydim0;
cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_top, &xdim1, sizeof(int) );
xdim1_update_halo_kernel2_zvel_plus_2_top_h = xdim1;
cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_top, &ydim1, sizeof(int) );
ydim1_update_halo_kernel2_zvel_plus_2_top_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_plus_2_top<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[51].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[51].mpi_time += t2-t1;
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[51].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_plus_2_top(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 51;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 51;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_plus_2_top_execute;
if (OPS_diags > 1) {
ops_timing_realloc(51,"update_halo_kernel2_zvel_plus_2_top");
}
ops_enqueue_kernel(desc);
}
#endif
|
344b6e2e9fe96ec4d2b451b6695b8181b497b891.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__global__ void orcu_kernel12113(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ysum;
int j, k, col, row;
for (int i=tid; i<=nrows-1; i+=gsize) {
{
ysum=0.0;
for (j=0; j<=ndiags-1; j++ ) {
row=i+j*sbdiag;
col=(floor((float)i/ndofs)+offsets[j])*ndofs;
if (col>=0&&col<nrows)
for (k=0; k<=ndofs-1; k++ )
ysum=ysum+A[row+k*nrows]*x[col+k];
}
y[i]=ysum;
}
}
}
void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) {
register int i,j,k;
int col,row;
double ysum;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,105,14);
param PL[] = [16,32,48];
}
def input_params {
param M[] = [64];
param N[] = [64];
param P[] = [64];
param NOS = 7;
param DOF[] = range(1,17);
constraint c1 = (M==N);
constraint c2 = (N==P);
}
def input_vars {
decl dynamic double A[M*N*P*DOF*DOF*NOS] = random;
decl dynamic double x[M*N*P*DOF] = random;
decl dynamic double y[M*N*P*DOF] = 0;
decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF};
}
) @*/
/**-- (Generated by Orio)
Best performance cost:
[3.0666899999999999, 3.04982, 3.04637, 3.0364200000000001, 3.0546899999999999]
Tuned for specific problem sizes:
DOF = 6
M = 64
N = 64
NOS = 7
P = 64
Best performance parameters:
BC = 84
PL = 32
TC = 768
--**/
int nrows=M*N*P*DOF;
int ndiags=NOS;
int ndofs=DOF;
int sbdiag=M*N*P*DOF*DOF;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL)
for(i=0; i<=nrows-1; i++){
ysum = 0.0;
for(j=0; j<=ndiags-1; j++){
row = i+j*sbdiag;
col = (floor((float)i/ndofs)+offsets[j])*ndofs;
if(col>=0&&col<nrows)
for(k=0; k<=ndofs-1; k++)
ysum += A[row+k*nrows] * x[col+k];
}
y[i] = ysum;
}
) @*/
{
hipDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_x, *dev_y;
int *dev_offsets;
int nthreads=768;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=84;
/*allocate device memory*/
hipMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double));
hipMalloc(&dev_x,M *N *P *DOF*sizeof(double));
hipMalloc(&dev_y,M *N *P *DOF*sizeof(double));
hipMalloc(&dev_offsets,NOS*sizeof(int));
hipDeviceSetCacheConfig(hipFuncCachePreferEqual);
/*copy data from host to device*/
hipEventRecord(tstart,0);
hipMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(dev_offsets,offsets,NOS*sizeof(int),hipMemcpyHostToDevice);
hipEventRecord(tstop,0);
hipEventSynchronize(tstop);
hipEventElapsedTime(&orcu_transfer,tstart,tstop);
hipEventRecord(start,0);
/*invoke device kernel*/
hipLaunchKernelGGL(( orcu_kernel12113), dim3(dimGrid),dim3(dimBlock), 0, 0, nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y);
hipEventRecord(stop,0);
hipEventSynchronize(stop);
hipEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
hipMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),hipMemcpyDeviceToHost);
hipDeviceSetCacheConfig(hipFuncCachePreferNone);
/*free allocated memory*/
hipFree(dev_A);
hipFree(dev_x);
hipFree(dev_y);
hipFree(dev_offsets);
hipError_t err=hipGetLastError();
if (hipSuccess!=err)
printf("CUDA runtime error: %s@",hipGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
| 344b6e2e9fe96ec4d2b451b6695b8181b497b891.cu | __global__ void orcu_kernel12113(const int nrows, const int ndiags, int sbdiag, int ndofs, int* offsets, double* A, double* x, double* y) {
const int tid=blockIdx.x*blockDim.x+threadIdx.x;
const int gsize=gridDim.x*blockDim.x;
double ysum;
int j, k, col, row;
for (int i=tid; i<=nrows-1; i+=gsize) {
{
ysum=0.0;
for (j=0; j<=ndiags-1; j++ ) {
row=i+j*sbdiag;
col=(floor((float)i/ndofs)+offsets[j])*ndofs;
if (col>=0&&col<nrows)
for (k=0; k<=ndofs-1; k++ )
ysum=ysum+A[row+k*nrows]*x[col+k];
}
y[i]=ysum;
}
}
}
void MatMult_SeqDIA(double* A, double* x, double* y, int M, int N, int P, int NOS, int DOF) {
register int i,j,k;
int col,row;
double ysum;
/*@ begin PerfTuning (
def performance_params {
param TC[] = range(32,1025,32);
param BC[] = range(14,105,14);
param PL[] = [16,32,48];
}
def input_params {
param M[] = [64];
param N[] = [64];
param P[] = [64];
param NOS = 7;
param DOF[] = range(1,17);
constraint c1 = (M==N);
constraint c2 = (N==P);
}
def input_vars {
decl dynamic double A[M*N*P*DOF*DOF*NOS] = random;
decl dynamic double x[M*N*P*DOF] = random;
decl dynamic double y[M*N*P*DOF] = 0;
decl static int offsets[NOS] = {-M*N*DOF,-M*DOF,-DOF,0,DOF,M*DOF,M*N*DOF};
}
) @*/
/**-- (Generated by Orio)
Best performance cost:
[3.0666899999999999, 3.04982, 3.04637, 3.0364200000000001, 3.0546899999999999]
Tuned for specific problem sizes:
DOF = 6
M = 64
N = 64
NOS = 7
P = 64
Best performance parameters:
BC = 84
PL = 32
TC = 768
--**/
int nrows=M*N*P*DOF;
int ndiags=NOS;
int ndofs=DOF;
int sbdiag=M*N*P*DOF*DOF;
/*@ begin Loop(transform CUDA(threadCount=TC, blockCount=BC, preferL1Size=PL)
for(i=0; i<=nrows-1; i++){
ysum = 0.0;
for(j=0; j<=ndiags-1; j++){
row = i+j*sbdiag;
col = (floor((float)i/ndofs)+offsets[j])*ndofs;
if(col>=0&&col<nrows)
for(k=0; k<=ndofs-1; k++)
ysum += A[row+k*nrows] * x[col+k];
}
y[i] = ysum;
}
) @*/
{
cudaDeviceSynchronize();
/*declare variables*/
double *dev_A, *dev_x, *dev_y;
int *dev_offsets;
int nthreads=768;
/*calculate device dimensions*/
dim3 dimGrid, dimBlock;
dimBlock.x=nthreads;
dimGrid.x=84;
/*allocate device memory*/
cudaMalloc(&dev_A,M *N *P *DOF *DOF *NOS*sizeof(double));
cudaMalloc(&dev_x,M *N *P *DOF*sizeof(double));
cudaMalloc(&dev_y,M *N *P *DOF*sizeof(double));
cudaMalloc(&dev_offsets,NOS*sizeof(int));
cudaDeviceSetCacheConfig(cudaFuncCachePreferEqual);
/*copy data from host to device*/
cudaEventRecord(tstart,0);
cudaMemcpy(dev_A,A,M *N *P *DOF *DOF *NOS*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_x,x,M *N *P *DOF*sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(dev_offsets,offsets,NOS*sizeof(int),cudaMemcpyHostToDevice);
cudaEventRecord(tstop,0);
cudaEventSynchronize(tstop);
cudaEventElapsedTime(&orcu_transfer,tstart,tstop);
cudaEventRecord(start,0);
/*invoke device kernel*/
orcu_kernel12113<<<dimGrid,dimBlock>>>(nrows,ndiags,sbdiag,ndofs,dev_offsets,dev_A,dev_x,dev_y);
cudaEventRecord(stop,0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&orcu_elapsed,start,stop);
/*copy data from device to host*/
cudaMemcpy(y,dev_y,M *N *P *DOF*sizeof(double),cudaMemcpyDeviceToHost);
cudaDeviceSetCacheConfig(cudaFuncCachePreferNone);
/*free allocated memory*/
cudaFree(dev_A);
cudaFree(dev_x);
cudaFree(dev_y);
cudaFree(dev_offsets);
cudaError_t err=cudaGetLastError();
if (cudaSuccess!=err)
printf("CUDA runtime error: %s@",cudaGetErrorString(err));
}
/*@ end @*/
/*@ end @*/
}
|
cb543840d8cdcba1e2696ae6d8af9be3b0f25072.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel5_plus_2_left [3][2];
static int dims_update_halo_kernel5_plus_2_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel5_plus_2_left_gpu(ACC<double> &vol_flux_z,
ACC<double> &mass_flux_z,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = (vol_flux_z(2,0,0));
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = (mass_flux_z(2,0,0));
}
__global__ void ops_update_halo_kernel5_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_2_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_2_left[0][0] * dims_update_halo_kernel5_plus_2_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_2_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_2_left[1][0] * dims_update_halo_kernel5_plus_2_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel5_plus_2_left[0][0], dims_update_halo_kernel5_plus_2_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel5_plus_2_left[1][0], dims_update_halo_kernel5_plus_2_left[1][1], arg1);
update_halo_kernel5_plus_2_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel5_plus_2_left_h[0][0] || ydim0 != dims_update_halo_kernel5_plus_2_left_h[0][1] || xdim1 != dims_update_halo_kernel5_plus_2_left_h[1][0] || ydim1 != dims_update_halo_kernel5_plus_2_left_h[1][1]) {
dims_update_halo_kernel5_plus_2_left_h[0][0] = xdim0;
dims_update_halo_kernel5_plus_2_left_h[0][1] = ydim0;
dims_update_halo_kernel5_plus_2_left_h[1][0] = xdim1;
dims_update_halo_kernel5_plus_2_left_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel5_plus_2_left, dims_update_halo_kernel5_plus_2_left_h, sizeof(dims_update_halo_kernel5_plus_2_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_2_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
| cb543840d8cdcba1e2696ae6d8af9be3b0f25072.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel5_plus_2_left [3][2];
static int dims_update_halo_kernel5_plus_2_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel5_plus_2_left_gpu(ACC<double> &vol_flux_z,
ACC<double> &mass_flux_z,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = (vol_flux_z(2,0,0));
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = (mass_flux_z(2,0,0));
}
__global__ void ops_update_halo_kernel5_plus_2_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_2_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_2_left[0][0] * dims_update_halo_kernel5_plus_2_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_2_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_2_left[1][0] * dims_update_halo_kernel5_plus_2_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel5_plus_2_left[0][0], dims_update_halo_kernel5_plus_2_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel5_plus_2_left[1][0], dims_update_halo_kernel5_plus_2_left[1][1], arg1);
update_halo_kernel5_plus_2_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_2_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,89)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
OPS_kernels[89].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel5_plus_2_left_h[0][0] || ydim0 != dims_update_halo_kernel5_plus_2_left_h[0][1] || xdim1 != dims_update_halo_kernel5_plus_2_left_h[1][0] || ydim1 != dims_update_halo_kernel5_plus_2_left_h[1][1]) {
dims_update_halo_kernel5_plus_2_left_h[0][0] = xdim0;
dims_update_halo_kernel5_plus_2_left_h[0][1] = ydim0;
dims_update_halo_kernel5_plus_2_left_h[1][0] = xdim1;
dims_update_halo_kernel5_plus_2_left_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel5_plus_2_left, dims_update_halo_kernel5_plus_2_left_h, sizeof(dims_update_halo_kernel5_plus_2_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_2_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[89].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[89].mpi_time += t2-t1;
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[89].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_2_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 89;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 89;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_2_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(89,"update_halo_kernel5_plus_2_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
e87ba8a4ea59aebfad336084f257f74a9565f12f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <random>
#include <thrust/device_vector.h>
#include "gtest/gtest.h"
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <../../src/hashmap/concurrent_unordered_map.cuh>
#include "../../src/groupby/hash/aggregation_operations.cuh"
// This is necessary to do a parametrized typed-test over multiple template arguments
template <typename Key, typename Value, template <typename> typename Aggregation_Operator>
struct KeyValueTypes
{
using key_type = Key;
using value_type = Value;
using op_type = Aggregation_Operator<value_type>;
};
// A new instance of this class will be created for each *TEST(MapTest, ...)
// Put all repeated stuff for each test here
template <class T>
struct MapTest : public testing::Test
{
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using op_type = typename T::op_type;
using map_type = concurrent_unordered_map<key_type, value_type, std::numeric_limits<key_type>::max()>;
using pair_type = thrust::pair<key_type, value_type>;
std::unique_ptr<map_type> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = op_type::IDENTITY;
const int size;
const int THREAD_BLOCK_SIZE{256};
std::vector<std::pair<key_type,value_type>> pairs;
thrust::device_vector<pair_type> d_pairs;
std::unordered_map<key_type, value_type> expected_values;
MapTest(const int hash_table_size = 10000)
: size(hash_table_size), the_map(new map_type(hash_table_size, op_type::IDENTITY))
{
}
pair_type * create_input(const int num_unique_keys, const int num_values_per_key, const int ratio = 2, const int max_key = RAND_MAX, const int max_value = RAND_MAX, bool shuffle = false)
{
const int TOTAL_PAIRS = num_unique_keys * num_values_per_key;
this->the_map.reset(new map_type(ratio*TOTAL_PAIRS, unused_value));
pairs.reserve(TOTAL_PAIRS);
// Always use the same seed so the random sequence is the same each time
std::srand(0);
for(int i = 0; i < num_unique_keys; ++i )
{
// Create random key
key_type current_key = std::rand() % max_key;
// Don't use unused_key
while(current_key == this->unused_key)
{
current_key = std::rand();
}
// For the current key, generate random values
for(int j = 0; j < num_values_per_key; ++j)
{
value_type current_value = std::rand() % max_value;
// Don't use unused_value
while(current_value == this->unused_value)
{
current_value = std::rand();
}
// Store current key and value
pairs.push_back(std::make_pair(current_key, current_value));
// Use a STL map to keep track of the max value for each key
auto found = expected_values.find(current_key);
// Key doesn't exist yet, insert it
if(found == expected_values.end())
{
expected_values.insert(std::make_pair(current_key,current_value));
}
// Key exists, update the value with the operator
else
{
op_type op;
value_type new_value = op(found->second, current_value);
found->second = new_value;
}
}
}
if(shuffle == true)
std::random_shuffle(pairs.begin(), pairs.end());
d_pairs = pairs;
return thrust::raw_pointer_cast(d_pairs.data());
}
void check_answer(){
for(auto const &k : this->expected_values)
{
key_type test_key = k.first;
value_type expected_value = k.second;
auto found = this->the_map->find(test_key);
ASSERT_NE(this->the_map->end(), found);
value_type test_value = found->second;
EXPECT_EQ(expected_value, test_value) << "Key is: " << test_key;
}
}
~MapTest(){
}
};
// Google Test can only do a parameterized typed-test over a single type, so we have
// to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types< KeyValueTypes<int, int, max_op>,
KeyValueTypes<int, float, max_op>,
KeyValueTypes<int, double, max_op>,
KeyValueTypes<int, long long int, max_op>,
KeyValueTypes<int, unsigned long long int, max_op>,
KeyValueTypes<unsigned long long int, int, max_op>,
KeyValueTypes<unsigned long long int, float, max_op>,
KeyValueTypes<unsigned long long int, double, max_op>,
KeyValueTypes<unsigned long long int, long long int, max_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, max_op>,
KeyValueTypes<int, int, min_op>,
KeyValueTypes<int, float, min_op>,
KeyValueTypes<int, double, min_op>,
KeyValueTypes<int, long long int, min_op>,
KeyValueTypes<int, unsigned long long int, min_op>,
KeyValueTypes<unsigned long long int, int, min_op>,
KeyValueTypes<unsigned long long int, float, min_op>,
KeyValueTypes<unsigned long long int, double, min_op>,
KeyValueTypes<unsigned long long int, long long int, min_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, min_op>
> Implementations;
TYPED_TEST_CASE(MapTest, Implementations);
TYPED_TEST(MapTest, InitialState)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin,end);
}
TYPED_TEST(MapTest, CheckUnusedValues){
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
TYPED_TEST(MapTest, AggregationTestHost)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
thrust::pair<key_type, value_type> first_pair{0,0};
thrust::pair<key_type, value_type> second_pair{0,10};
thrust::pair<key_type, value_type> third_pair{0,5};
auto max = [](value_type a, value_type b) { return (a >= b ? a : b); };
this->the_map->insert(first_pair, max);
auto found = this->the_map->find(0);
EXPECT_EQ(0, found->second);
this->the_map->insert(second_pair, max);
found = this->the_map->find(0);
EXPECT_EQ(10, found->second);
this->the_map->insert(third_pair, max);
found = this->the_map->find(0);
EXPECT_EQ(10, found->second);
this->the_map->insert(thrust::make_pair(0,11), max);
found = this->the_map->find(0);
EXPECT_EQ(11, found->second);
this->the_map->insert(thrust::make_pair(7, 42), max);
found = this->the_map->find(7);
EXPECT_EQ(42, found->second);
this->the_map->insert(thrust::make_pair(7, 62), max);
found = this->the_map->find(7);
EXPECT_EQ(62, found->second);
this->the_map->insert(thrust::make_pair(7, 42), max);
found = this->the_map->find(7);
EXPECT_EQ(62, found->second);
found = this->the_map->find(0);
EXPECT_EQ(11, found->second);
}
template<typename map_type, typename Aggregation_Operator>
__global__ void build_table(map_type * const the_map,
const typename map_type::value_type * const input_pairs,
const typename map_type::size_type input_size,
Aggregation_Operator op)
{
using size_type = typename map_type::size_type;
size_type i = threadIdx.x + blockIdx.x * blockDim.x;
while( i < input_size ){
the_map->insert(input_pairs[i], op);
i += blockDim.x * gridDim.x;
}
}
TYPED_TEST(MapTest, AggregationTestDeviceAllSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1, 1<<20);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceAllUnique)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<18, 1);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceWarpSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<15, 32);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceBlockSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<12, this->THREAD_BLOCK_SIZE);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
hipDeviceSynchronize();
hipLaunchKernelGGL(( build_table), dim3(grid_size), dim3(block_size), 0, 0, (this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
hipDeviceSynchronize();
this->check_answer();
}
int main(int argc, char * argv[]){
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| e87ba8a4ea59aebfad336084f257f74a9565f12f.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cstdlib>
#include <iostream>
#include <vector>
#include <unordered_map>
#include <random>
#include <thrust/device_vector.h>
#include "gtest/gtest.h"
#include <gdf/gdf.h>
#include <gdf/cffi/functions.h>
#include <../../src/hashmap/concurrent_unordered_map.cuh>
#include "../../src/groupby/hash/aggregation_operations.cuh"
// This is necessary to do a parametrized typed-test over multiple template arguments
template <typename Key, typename Value, template <typename> typename Aggregation_Operator>
struct KeyValueTypes
{
using key_type = Key;
using value_type = Value;
using op_type = Aggregation_Operator<value_type>;
};
// A new instance of this class will be created for each *TEST(MapTest, ...)
// Put all repeated stuff for each test here
template <class T>
struct MapTest : public testing::Test
{
using key_type = typename T::key_type;
using value_type = typename T::value_type;
using op_type = typename T::op_type;
using map_type = concurrent_unordered_map<key_type, value_type, std::numeric_limits<key_type>::max()>;
using pair_type = thrust::pair<key_type, value_type>;
std::unique_ptr<map_type> the_map;
const key_type unused_key = std::numeric_limits<key_type>::max();
const value_type unused_value = op_type::IDENTITY;
const int size;
const int THREAD_BLOCK_SIZE{256};
std::vector<std::pair<key_type,value_type>> pairs;
thrust::device_vector<pair_type> d_pairs;
std::unordered_map<key_type, value_type> expected_values;
MapTest(const int hash_table_size = 10000)
: size(hash_table_size), the_map(new map_type(hash_table_size, op_type::IDENTITY))
{
}
pair_type * create_input(const int num_unique_keys, const int num_values_per_key, const int ratio = 2, const int max_key = RAND_MAX, const int max_value = RAND_MAX, bool shuffle = false)
{
const int TOTAL_PAIRS = num_unique_keys * num_values_per_key;
this->the_map.reset(new map_type(ratio*TOTAL_PAIRS, unused_value));
pairs.reserve(TOTAL_PAIRS);
// Always use the same seed so the random sequence is the same each time
std::srand(0);
for(int i = 0; i < num_unique_keys; ++i )
{
// Create random key
key_type current_key = std::rand() % max_key;
// Don't use unused_key
while(current_key == this->unused_key)
{
current_key = std::rand();
}
// For the current key, generate random values
for(int j = 0; j < num_values_per_key; ++j)
{
value_type current_value = std::rand() % max_value;
// Don't use unused_value
while(current_value == this->unused_value)
{
current_value = std::rand();
}
// Store current key and value
pairs.push_back(std::make_pair(current_key, current_value));
// Use a STL map to keep track of the max value for each key
auto found = expected_values.find(current_key);
// Key doesn't exist yet, insert it
if(found == expected_values.end())
{
expected_values.insert(std::make_pair(current_key,current_value));
}
// Key exists, update the value with the operator
else
{
op_type op;
value_type new_value = op(found->second, current_value);
found->second = new_value;
}
}
}
if(shuffle == true)
std::random_shuffle(pairs.begin(), pairs.end());
d_pairs = pairs;
return thrust::raw_pointer_cast(d_pairs.data());
}
void check_answer(){
for(auto const &k : this->expected_values)
{
key_type test_key = k.first;
value_type expected_value = k.second;
auto found = this->the_map->find(test_key);
ASSERT_NE(this->the_map->end(), found);
value_type test_value = found->second;
EXPECT_EQ(expected_value, test_value) << "Key is: " << test_key;
}
}
~MapTest(){
}
};
// Google Test can only do a parameterized typed-test over a single type, so we have
// to nest multiple types inside of the KeyValueTypes struct above
// KeyValueTypes<type1, type2> implies key_type = type1, value_type = type2
// This list is the types across which Google Test will run our tests
typedef ::testing::Types< KeyValueTypes<int, int, max_op>,
KeyValueTypes<int, float, max_op>,
KeyValueTypes<int, double, max_op>,
KeyValueTypes<int, long long int, max_op>,
KeyValueTypes<int, unsigned long long int, max_op>,
KeyValueTypes<unsigned long long int, int, max_op>,
KeyValueTypes<unsigned long long int, float, max_op>,
KeyValueTypes<unsigned long long int, double, max_op>,
KeyValueTypes<unsigned long long int, long long int, max_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, max_op>,
KeyValueTypes<int, int, min_op>,
KeyValueTypes<int, float, min_op>,
KeyValueTypes<int, double, min_op>,
KeyValueTypes<int, long long int, min_op>,
KeyValueTypes<int, unsigned long long int, min_op>,
KeyValueTypes<unsigned long long int, int, min_op>,
KeyValueTypes<unsigned long long int, float, min_op>,
KeyValueTypes<unsigned long long int, double, min_op>,
KeyValueTypes<unsigned long long int, long long int, min_op>,
KeyValueTypes<unsigned long long int, unsigned long long int, min_op>
> Implementations;
TYPED_TEST_CASE(MapTest, Implementations);
TYPED_TEST(MapTest, InitialState)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
auto begin = this->the_map->begin();
auto end = this->the_map->end();
EXPECT_NE(begin,end);
}
TYPED_TEST(MapTest, CheckUnusedValues){
EXPECT_EQ(this->the_map->get_unused_key(), this->unused_key);
auto begin = this->the_map->begin();
EXPECT_EQ(begin->first, this->unused_key);
EXPECT_EQ(begin->second, this->unused_value);
}
TYPED_TEST(MapTest, AggregationTestHost)
{
using key_type = typename TypeParam::key_type;
using value_type = typename TypeParam::value_type;
thrust::pair<key_type, value_type> first_pair{0,0};
thrust::pair<key_type, value_type> second_pair{0,10};
thrust::pair<key_type, value_type> third_pair{0,5};
auto max = [](value_type a, value_type b) { return (a >= b ? a : b); };
this->the_map->insert(first_pair, max);
auto found = this->the_map->find(0);
EXPECT_EQ(0, found->second);
this->the_map->insert(second_pair, max);
found = this->the_map->find(0);
EXPECT_EQ(10, found->second);
this->the_map->insert(third_pair, max);
found = this->the_map->find(0);
EXPECT_EQ(10, found->second);
this->the_map->insert(thrust::make_pair(0,11), max);
found = this->the_map->find(0);
EXPECT_EQ(11, found->second);
this->the_map->insert(thrust::make_pair(7, 42), max);
found = this->the_map->find(7);
EXPECT_EQ(42, found->second);
this->the_map->insert(thrust::make_pair(7, 62), max);
found = this->the_map->find(7);
EXPECT_EQ(62, found->second);
this->the_map->insert(thrust::make_pair(7, 42), max);
found = this->the_map->find(7);
EXPECT_EQ(62, found->second);
found = this->the_map->find(0);
EXPECT_EQ(11, found->second);
}
template<typename map_type, typename Aggregation_Operator>
__global__ void build_table(map_type * const the_map,
const typename map_type::value_type * const input_pairs,
const typename map_type::size_type input_size,
Aggregation_Operator op)
{
using size_type = typename map_type::size_type;
size_type i = threadIdx.x + blockIdx.x * blockDim.x;
while( i < input_size ){
the_map->insert(input_pairs[i], op);
i += blockDim.x * gridDim.x;
}
}
TYPED_TEST(MapTest, AggregationTestDeviceAllSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1, 1<<20);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceAllUnique)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<18, 1);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceWarpSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<15, 32);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
TYPED_TEST(MapTest, AggregationTestDeviceBlockSame)
{
using value_type = typename TypeParam::value_type;
using pair_type = typename MapTest<TypeParam>::pair_type;
using op_type = typename MapTest<TypeParam>::op_type;
pair_type * d_pairs = this->create_input(1<<12, this->THREAD_BLOCK_SIZE);
const dim3 grid_size ((this->d_pairs.size() + this->THREAD_BLOCK_SIZE -1) / this->THREAD_BLOCK_SIZE,1,1);
const dim3 block_size (this->THREAD_BLOCK_SIZE, 1, 1);
cudaDeviceSynchronize();
build_table<<<grid_size, block_size>>>((this->the_map).get(), d_pairs, this->d_pairs.size(), op_type());
cudaDeviceSynchronize();
this->check_answer();
}
int main(int argc, char * argv[]){
testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
|
b2b334f41a7175791c11c4c128b4862baf8a57ae.hip | // !!! This is a file automatically generated by hipify!!!
#include <climits>
#include <cassert>
#include <chrono>
#include <sstream>
#include <fstream>
#include <omp.h>
#include "util/graph.h"
#include "util/log.h"
#include "util/pretty_print.h"
#include "util/timer.h"
#include "util/cuda/cuda_util.h"
#include "reordering_utils/reorder_utils.h"
#include "cuda_extern_functions.cuh"
#include "main_helper.h"
int main(int argc, char *argv[]) {
setlocale(LC_NUMERIC, "");
if (argc < 2) {
fprintf(stderr, "%s <Graph file>\n", argv[0]);
exit(1);
}
ZLCUDAMemStat mem_stat;
ZLCUDATimer time_stat;
string algorithm_name;
string cmd(argv[0]);
auto pos = cmd.find_last_of('/');
if (pos != string::npos) {
algorithm_name = cmd.substr(pos + 1, cmd.size());
} else {
algorithm_name = cmd;
}
//set log file descriptor
#ifdef USE_LOG
FILE *log_f = nullptr;
if (argc >= 4) {
log_f = fopen(argv[3], "a+");
log_set_fp(log_f);
}
#endif
log_info("Algorithm name: %s", algorithm_name.c_str());
read_env();
graph_t g;
//load the graph from file
Graph yche_graph(argv[1]);
g.adj = yche_graph.edge_dst;
g.num_edges = yche_graph.node_off;
g.n = yche_graph.nodemax;
g.m = yche_graph.edgemax;
string reorder_method(argv[2]);
vector<int32_t> new_vid_dict;
vector<int32_t> old_vid_dict;
ReorderWrapper(g, argv[1], reorder_method, new_vid_dict, old_vid_dict);
/************ Compute k - truss *****************************************/
//edge list array
Timer get_eid_timer;
auto edgeIdToEdge = (Edge *) malloc(sizeof(Edge) * (g.m / 2));
auto EdgeSupport = (int *) malloc(sizeof(int) * (g.m / 2));
log_info("Malloc Time: %.9lf s", get_eid_timer.elapsed());
auto org_num_edges = g.m / 2;
get_eid_timer.reset();
//Populate the edge list array
getEidAndEdgeList(&g, edgeIdToEdge);
log_info("Init Eid Time: %.9lf s", get_eid_timer.elapsed());
get_eid_timer.reset();
auto max_omp_threads = omp_get_max_threads();
log_info("Max Threads: %d", max_omp_threads);
#pragma omp parallel for
for (auto i = 0; i < max_omp_threads; i++) {
auto avg = g.m / 2 / max_omp_threads;
auto iter_beg = avg * i;
auto iter_end = (i == max_omp_threads - 1) ? g.m / 2 : avg * (i + 1);
memset(EdgeSupport + iter_beg, 0, (iter_end - iter_beg) * sizeof(int));
}
log_info("Init EdgeSupport Time: %.9lf s", get_eid_timer.elapsed());
get_eid_timer.reset();
Timer global_timer;
int *EdgeSupportCUDA, *output, *final_result_output;
ZLCudaMalloc(&output, sizeof(int) * org_num_edges, &mem_stat);
ZLCudaMalloc(&final_result_output, sizeof(int) * org_num_edges, &mem_stat);
ZLCudaMalloc(&EdgeSupportCUDA, sizeof(int) * (g.m / 2), &mem_stat);
log_info("Malloc Time: %.9lfs", global_timer.elapsed());
#ifndef GPU_ONLY
eid_t *level_start_pos, *edge_offsets_level, *edge_off_org;
int *edge_sup;
Edge *edge_lst;
auto level = TrussDecompositionLevelsCPU(g, EdgeSupport, edgeIdToEdge,
level_start_pos, edge_offsets_level, edge_off_org,
edge_sup, edge_lst);
Timer offload_timer;
hipMemcpy(output, edge_offsets_level, sizeof(int) * org_num_edges, hipMemcpyHostToDevice);
hipMemcpy(EdgeSupportCUDA, edge_sup, sizeof(int) * g.m / 2, hipMemcpyHostToDevice);
assert(level_start_pos[level + 1] == 0);
log_info("Current g.m: %'lld", g.m / 2);
PKT_cuda(&g, edge_off_org, EdgeSupportCUDA, edge_lst,
100, output, level_start_pos, &mem_stat, &time_stat, level);
hipDeviceSynchronize();
log_info("Offloading Comp Time: %.9lfs", offload_timer.elapsed());
#else
auto level_start_pos = (eid_t *) calloc(MAX_LEVEL, sizeof(eid_t));
hipMemcpy(EdgeSupportCUDA, EdgeSupport, sizeof(int) * g.m / 2, hipMemcpyHostToDevice);
Timer tc_timer;
invoke_tc_bmp_gpu(&g, EdgeSupportCUDA);
extern double tc_time;
tc_time = tc_timer.elapsed();
PKT_cuda(&g, nullptr, EdgeSupportCUDA, edgeIdToEdge,
100, output, level_start_pos, &mem_stat, &time_stat, 0);
#endif
log_info("Parallel K-Truss: %.9lfs", global_timer.elapsed());
auto output_dir = string(argv[1]) + "/" + string("ktruss-") + algorithm_name + ".histogram";
log_info("Output Dir: %s", output_dir.c_str());
/*Recover the EdgeSupport for checking*/
Timer recovery_timer;
CheckLevelOff(level_start_pos);
#pragma omp parallel
for (int l = 0;; l++) {
auto start = level_start_pos[l];
auto end = level_start_pos[l + 1];
if (start == org_num_edges) break;
if (start > end) {
log_fatal("error in level stat pos: %d, [%d, %d)", l, start, end);
exit(-1);
}
#pragma omp for
for (int i = start; i < end; i++) {
final_result_output[output[i]] = l;
}
}
log_info("Finish Constructing Final Results..., Recovery Time: %.9lfs", recovery_timer.elapsed());
display_stats(final_result_output, org_num_edges, output_dir);
log_info("Finish Checking, ET: %.9lfs", recovery_timer.elapsed_and_reset());
//Free memory
free_graph(&g);
free(level_start_pos);
#ifndef GPU_ONLY
free(edge_offsets_level);
free(edge_off_org);
free(edge_sup);
free(edge_lst);
#endif
free(edgeIdToEdge);
free(EdgeSupport);
ZLCudaFree(output, &mem_stat);
ZLCudaFree(final_result_output, &mem_stat);
log_info("Free Time: %.9lfs", recovery_timer.elapsed());
#ifdef USE_LOG
if (log_f != nullptr) {
log_info("Flush File and Close...");
fflush(log_f);
fclose(log_f);
}
#endif
return 0;
}
| b2b334f41a7175791c11c4c128b4862baf8a57ae.cu | #include <climits>
#include <cassert>
#include <chrono>
#include <sstream>
#include <fstream>
#include <omp.h>
#include "util/graph.h"
#include "util/log.h"
#include "util/pretty_print.h"
#include "util/timer.h"
#include "util/cuda/cuda_util.h"
#include "reordering_utils/reorder_utils.h"
#include "cuda_extern_functions.cuh"
#include "main_helper.h"
int main(int argc, char *argv[]) {
setlocale(LC_NUMERIC, "");
if (argc < 2) {
fprintf(stderr, "%s <Graph file>\n", argv[0]);
exit(1);
}
ZLCUDAMemStat mem_stat;
ZLCUDATimer time_stat;
string algorithm_name;
string cmd(argv[0]);
auto pos = cmd.find_last_of('/');
if (pos != string::npos) {
algorithm_name = cmd.substr(pos + 1, cmd.size());
} else {
algorithm_name = cmd;
}
//set log file descriptor
#ifdef USE_LOG
FILE *log_f = nullptr;
if (argc >= 4) {
log_f = fopen(argv[3], "a+");
log_set_fp(log_f);
}
#endif
log_info("Algorithm name: %s", algorithm_name.c_str());
read_env();
graph_t g;
//load the graph from file
Graph yche_graph(argv[1]);
g.adj = yche_graph.edge_dst;
g.num_edges = yche_graph.node_off;
g.n = yche_graph.nodemax;
g.m = yche_graph.edgemax;
string reorder_method(argv[2]);
vector<int32_t> new_vid_dict;
vector<int32_t> old_vid_dict;
ReorderWrapper(g, argv[1], reorder_method, new_vid_dict, old_vid_dict);
/************ Compute k - truss *****************************************/
//edge list array
Timer get_eid_timer;
auto edgeIdToEdge = (Edge *) malloc(sizeof(Edge) * (g.m / 2));
auto EdgeSupport = (int *) malloc(sizeof(int) * (g.m / 2));
log_info("Malloc Time: %.9lf s", get_eid_timer.elapsed());
auto org_num_edges = g.m / 2;
get_eid_timer.reset();
//Populate the edge list array
getEidAndEdgeList(&g, edgeIdToEdge);
log_info("Init Eid Time: %.9lf s", get_eid_timer.elapsed());
get_eid_timer.reset();
auto max_omp_threads = omp_get_max_threads();
log_info("Max Threads: %d", max_omp_threads);
#pragma omp parallel for
for (auto i = 0; i < max_omp_threads; i++) {
auto avg = g.m / 2 / max_omp_threads;
auto iter_beg = avg * i;
auto iter_end = (i == max_omp_threads - 1) ? g.m / 2 : avg * (i + 1);
memset(EdgeSupport + iter_beg, 0, (iter_end - iter_beg) * sizeof(int));
}
log_info("Init EdgeSupport Time: %.9lf s", get_eid_timer.elapsed());
get_eid_timer.reset();
Timer global_timer;
int *EdgeSupportCUDA, *output, *final_result_output;
ZLCudaMalloc(&output, sizeof(int) * org_num_edges, &mem_stat);
ZLCudaMalloc(&final_result_output, sizeof(int) * org_num_edges, &mem_stat);
ZLCudaMalloc(&EdgeSupportCUDA, sizeof(int) * (g.m / 2), &mem_stat);
log_info("Malloc Time: %.9lfs", global_timer.elapsed());
#ifndef GPU_ONLY
eid_t *level_start_pos, *edge_offsets_level, *edge_off_org;
int *edge_sup;
Edge *edge_lst;
auto level = TrussDecompositionLevelsCPU(g, EdgeSupport, edgeIdToEdge,
level_start_pos, edge_offsets_level, edge_off_org,
edge_sup, edge_lst);
Timer offload_timer;
cudaMemcpy(output, edge_offsets_level, sizeof(int) * org_num_edges, cudaMemcpyHostToDevice);
cudaMemcpy(EdgeSupportCUDA, edge_sup, sizeof(int) * g.m / 2, cudaMemcpyHostToDevice);
assert(level_start_pos[level + 1] == 0);
log_info("Current g.m: %'lld", g.m / 2);
PKT_cuda(&g, edge_off_org, EdgeSupportCUDA, edge_lst,
100, output, level_start_pos, &mem_stat, &time_stat, level);
cudaDeviceSynchronize();
log_info("Offloading Comp Time: %.9lfs", offload_timer.elapsed());
#else
auto level_start_pos = (eid_t *) calloc(MAX_LEVEL, sizeof(eid_t));
cudaMemcpy(EdgeSupportCUDA, EdgeSupport, sizeof(int) * g.m / 2, cudaMemcpyHostToDevice);
Timer tc_timer;
invoke_tc_bmp_gpu(&g, EdgeSupportCUDA);
extern double tc_time;
tc_time = tc_timer.elapsed();
PKT_cuda(&g, nullptr, EdgeSupportCUDA, edgeIdToEdge,
100, output, level_start_pos, &mem_stat, &time_stat, 0);
#endif
log_info("Parallel K-Truss: %.9lfs", global_timer.elapsed());
auto output_dir = string(argv[1]) + "/" + string("ktruss-") + algorithm_name + ".histogram";
log_info("Output Dir: %s", output_dir.c_str());
/*Recover the EdgeSupport for checking*/
Timer recovery_timer;
CheckLevelOff(level_start_pos);
#pragma omp parallel
for (int l = 0;; l++) {
auto start = level_start_pos[l];
auto end = level_start_pos[l + 1];
if (start == org_num_edges) break;
if (start > end) {
log_fatal("error in level stat pos: %d, [%d, %d)", l, start, end);
exit(-1);
}
#pragma omp for
for (int i = start; i < end; i++) {
final_result_output[output[i]] = l;
}
}
log_info("Finish Constructing Final Results..., Recovery Time: %.9lfs", recovery_timer.elapsed());
display_stats(final_result_output, org_num_edges, output_dir);
log_info("Finish Checking, ET: %.9lfs", recovery_timer.elapsed_and_reset());
//Free memory
free_graph(&g);
free(level_start_pos);
#ifndef GPU_ONLY
free(edge_offsets_level);
free(edge_off_org);
free(edge_sup);
free(edge_lst);
#endif
free(edgeIdToEdge);
free(EdgeSupport);
ZLCudaFree(output, &mem_stat);
ZLCudaFree(final_result_output, &mem_stat);
log_info("Free Time: %.9lfs", recovery_timer.elapsed());
#ifdef USE_LOG
if (log_f != nullptr) {
log_info("Flush File and Close...");
fflush(log_f);
fclose(log_f);
}
#endif
return 0;
}
|
0a5a812b6c8e64d52675cd8580f97f8a4dea71a0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* @file : cg_eg3.cu
* @brief : Examples of using cooperative groups
* @details : cooperative groups for CUDA examples
* Note; limitations on maximum values that can be reduced (summation) is due to 32-bit architecture of
* GeForce GTX 980 Ti that I'm using; please make a hardware donation (for a Titan V or GTX 1080 Ti) if you find this code useful!
* @author : Ernest Yeung <[email protected]>
* @date : 20170104
* @ref : https://devblogs.nvidia.com/parallelforall/cooperative-groups/
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc cg_eg3.cu -o cg_eg3
*
* */
#include <hip/hip_cooperative_groups.h>
#include <iostream>
#include <algorithm> // std::fill_n
#include <memory> // std::unique_ptr
/* ********** functions to setup device GPU, test values ********** */
/** @fn getMaxGridSize
* @brief get maxGridSize (total number threads on a (thread) grid, on device GPU, of a single device GPU
* */
size_t get_maxGridSize() {
hipDeviceProp_t prop;
int count;
hipGetDeviceCount(&count);
size_t MAXGRIDSIZE;
if (count>0) {
hipGetDeviceProperties(&prop, 0);
MAXGRIDSIZE = prop.maxGridSize[0];
return MAXGRIDSIZE;
} else { return EXIT_FAILURE; }
};
__global__ void inc_kernel(int *input,int inc, int L) {
unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x;
for (unsigned int idx=k_x; idx < L; idx += blockDim.x*gridDim.x) {
input[idx] = ((int) idx + inc);
}
}
/* ********** END of functions to setup device GPU, test values ********** */
namespace cg = cooperative_groups;
/** @fn reduce_sum
* @details used to reduce (summation) on a single thread block in shared memory
* while not obvious from this function definition, in practical usage,
* val will be the partial sum that is at the index given by the global thread index
* threadIdx.x + blockDim.x * blockIdx.x;
* and so we'll have loaded all the various array values for this particular thread block into
* shared memory lane
* */
__device__ int reduce_sum(cg::thread_group g, int *temp, int val)
{
int lane = g.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds to partial sum[i] its sum[lane+i]
for (int i = g.size() / 2; i >0; i/=2)
{
// load the array values with this thread block into temp
temp[lane] = val;
g.sync(); // wait for all threads to store
if (lane <i) {
val += temp[lane+i];
}
g.sync(); // wait for all threads to load
}
return val; // note: only thread 0 will return full sum
};
/** @fn thread_sum
* @brief compute many partial sums in parallel, Generalized to when n is not a power of 2
* @details compute many partial sums in parallel, Generalized to when n is not a power of,
* where each thread strides through the array computing a partial sum
* */
__device__ int thread_sum(int *input, int L)
{
int sum =0;
unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x;
/* increment by blockDim.x*gridDim.x, so that a single thread will do all the
* "work" needed done on n, especially if n >= gridDim.x*blockDim.x = N_x*M_x */
for (int i=k_x;
i < L/4;
i += blockDim.x * gridDim.x)
{
int4 in = ((int4*) input)[i];
sum += in.x + in.y + in.z + in.w;
}
// process remaining elements
for (unsigned int idx= k_x + L/4*4; idx < L; idx += 4 ) {
sum += input[idx];
}
return sum;
};
/** @fn sum_kernel
* @brief sum kernel, generalized for n not a power of 2 */
__global__ void sum_kernel(int *sum, int *input, int L)
{
// for a particular thread k_x, we've obtained the
// sum of input[k_x], input[k_x+1], ... input[k_x+3] in sum4
int sum4 = thread_sum(input, L);
extern __shared__ int temp[];
auto g = cg::this_thread_block();
int block_sum = reduce_sum(g,temp,sum4);
if (g.thread_rank() == 0) {
atomicAdd(sum, block_sum);
}
};
int main(int argc, char* argv[])
{
size_t MAXGRIDSIZE = get_maxGridSize();
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
unsigned int M_x = 1<<6; // M_x = number of threads in x-direction, in a single block, i.e. blocksize; 2^8=256
unsigned int L = 1<<7; // doesn't output correct values for n = 1<<30
unsigned int MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
// notice how we're only launching 1/4 of L threads
unsigned int N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
int sharedBytes = M_x * sizeof(int);
/* ***** END of (thread) grid,block dims ***** */
// setup input, output
auto del_ints_lambda=[&](int* ptr) { hipFree(ptr); };
std::unique_ptr<int,decltype(del_ints_lambda)> sum(nullptr,del_ints_lambda);
std::unique_ptr<int[],decltype(del_ints_lambda)> input(nullptr,del_ints_lambda);
hipMallocManaged((void**)&sum, sizeof(int)) ;
hipMallocManaged((void**)&input, L*sizeof(int));
std::fill_n(input.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int));
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input.get(),L);
/* sanity check */
// host output of sum
std::unique_ptr<int> h_sum = std::make_unique<int>( 0 );
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<7 : " << (1<<7) << std::endl;
/* ******************************************************* */
/* ********** more tests of \sum_{i=1}^L 1 = L ********** */
/* ***** L = 1<<8 = 2^8 = 256 test ***** */
L = 1<< 8;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input1(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input1,L*sizeof(int));
std::fill_n(input1.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int));
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input1.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<8 : " << (1<<8) << std::endl;
/* ***** L = 1<<9 + 1= 2^9 + 1= 513 test ***** */
L = (1<< 9)+1;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input2(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input2,L*sizeof(int));
std::fill_n(input2.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int));
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input2.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<9) + 1 : " << ((1<<9)+1) << std::endl;
/* ***** L = 1<<29 = 2^29 test ***** */
{
L = (1<< 29);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input3(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input3,L*sizeof(int));
std::fill_n(input3.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input3.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29) : " << (1<<29) << std::endl;
}
/* ***** L = (1<<29) + 2 = (2^29 + 2) test ***** */
{
L = (1<< 29)+2;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29)+2 : " << ((1<<29)+2) << std::endl;
}
/* ***** L = 1<<30 = 2^30 test ***** */
{
L = (1<< 30);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30) : " << (1<<30) << std::endl;
}
/* ***** L = 1<<30 +3 = 2^30+3 test ***** */
{
L = (1<< 30)+3;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30)+3 : " << ((1<<30)+3) << std::endl;
}
/* ********** END of more tests of \sum_{i=1}^L 1 = L ********** */
/* ************************************************************ */
/* ********** more tests of \sum_{i=1}^L i = L(L+1)/2 ********** */
/* ***** L = 1<<15 = 2^15 test ***** */
{
L = (1<< 15);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input5(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input5,L*sizeof(int));
hipLaunchKernelGGL(( inc_kernel), dim3(min((L+M_x-1)/M_x,MAX_BLOCKS)), dim3(M_x), 0, 0, input5.get(),1,L);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input5.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl;
}
/* ***** L = 1<<15 + 2 = 2^15 +2 test ***** */
{
L = (1<< 15) + 2;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input6(nullptr,del_ints_lambda);
hipMallocManaged((void**) &input6,L*sizeof(int));
hipLaunchKernelGGL(( inc_kernel), dim3(min((L+M_x-1)/M_x,MAX_BLOCKS)), dim3(M_x), 0, 0, input6.get(),1,L);
hipMemset(sum.get(), 0,sizeof(int)); // reset the sum
hipLaunchKernelGGL(( sum_kernel), dim3(N_x),dim3(M_x),sharedBytes, 0, sum.get(),input6.get(),L);
/* sanity check */
// host output of sum
hipMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), hipMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl;
}
}
| 0a5a812b6c8e64d52675cd8580f97f8a4dea71a0.cu | /**
* @file : cg_eg3.cu
* @brief : Examples of using cooperative groups
* @details : cooperative groups for CUDA examples
* Note; limitations on maximum values that can be reduced (summation) is due to 32-bit architecture of
* GeForce GTX 980 Ti that I'm using; please make a hardware donation (for a Titan V or GTX 1080 Ti) if you find this code useful!
* @author : Ernest Yeung <[email protected]>
* @date : 20170104
* @ref : https://devblogs.nvidia.com/parallelforall/cooperative-groups/
*
* https://www.paypal.com/cgi-bin/webscr?cmd=_donations&business=ernestsaveschristmas%2bpaypal%40gmail%2ecom&lc=US&item_name=ernestyalumni¤cy_code=USD&bn=PP%2dDonationsBF%3abtn_donateCC_LG%2egif%3aNonHosted
*
* which won't go through a 3rd. party such as indiegogo, kickstarter, patreon.
* Otherwise, I receive emails and messages on how all my (free) material on
* physics, math, and engineering have helped students with their studies,
* and I know what it's like to not have money as a student, but love physics
* (or math, sciences, etc.), so I am committed to keeping all my material
* open-source and free, whether or not
* sufficiently crowdfunded, under the open-source MIT license:
* feel free to copy, edit, paste, make your own versions, share, use as you wish.
* Just don't be an asshole and not give credit where credit is due.
* Peace out, never give up! -EY
*
* */
/*
* COMPILATION TIP
* nvcc cg_eg3.cu -o cg_eg3
*
* */
#include <cooperative_groups.h>
#include <iostream>
#include <algorithm> // std::fill_n
#include <memory> // std::unique_ptr
/* ********** functions to setup device GPU, test values ********** */
/** @fn getMaxGridSize
* @brief get maxGridSize (total number threads on a (thread) grid, on device GPU, of a single device GPU
* */
size_t get_maxGridSize() {
cudaDeviceProp prop;
int count;
cudaGetDeviceCount(&count);
size_t MAXGRIDSIZE;
if (count>0) {
cudaGetDeviceProperties(&prop, 0);
MAXGRIDSIZE = prop.maxGridSize[0];
return MAXGRIDSIZE;
} else { return EXIT_FAILURE; }
};
__global__ void inc_kernel(int *input,int inc, int L) {
unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x;
for (unsigned int idx=k_x; idx < L; idx += blockDim.x*gridDim.x) {
input[idx] = ((int) idx + inc);
}
}
/* ********** END of functions to setup device GPU, test values ********** */
namespace cg = cooperative_groups;
/** @fn reduce_sum
* @details used to reduce (summation) on a single thread block in shared memory
* while not obvious from this function definition, in practical usage,
* val will be the partial sum that is at the index given by the global thread index
* threadIdx.x + blockDim.x * blockIdx.x;
* and so we'll have loaded all the various array values for this particular thread block into
* shared memory lane
* */
__device__ int reduce_sum(cg::thread_group g, int *temp, int val)
{
int lane = g.thread_rank();
// Each iteration halves the number of active threads
// Each thread adds to partial sum[i] its sum[lane+i]
for (int i = g.size() / 2; i >0; i/=2)
{
// load the array values with this thread block into temp
temp[lane] = val;
g.sync(); // wait for all threads to store
if (lane <i) {
val += temp[lane+i];
}
g.sync(); // wait for all threads to load
}
return val; // note: only thread 0 will return full sum
};
/** @fn thread_sum
* @brief compute many partial sums in parallel, Generalized to when n is not a power of 2
* @details compute many partial sums in parallel, Generalized to when n is not a power of,
* where each thread strides through the array computing a partial sum
* */
__device__ int thread_sum(int *input, int L)
{
int sum =0;
unsigned int k_x = threadIdx.x + blockDim.x*blockIdx.x;
/* increment by blockDim.x*gridDim.x, so that a single thread will do all the
* "work" needed done on n, especially if n >= gridDim.x*blockDim.x = N_x*M_x */
for (int i=k_x;
i < L/4;
i += blockDim.x * gridDim.x)
{
int4 in = ((int4*) input)[i];
sum += in.x + in.y + in.z + in.w;
}
// process remaining elements
for (unsigned int idx= k_x + L/4*4; idx < L; idx += 4 ) {
sum += input[idx];
}
return sum;
};
/** @fn sum_kernel
* @brief sum kernel, generalized for n not a power of 2 */
__global__ void sum_kernel(int *sum, int *input, int L)
{
// for a particular thread k_x, we've obtained the
// sum of input[k_x], input[k_x+1], ... input[k_x+3] in sum4
int sum4 = thread_sum(input, L);
extern __shared__ int temp[];
auto g = cg::this_thread_block();
int block_sum = reduce_sum(g,temp,sum4);
if (g.thread_rank() == 0) {
atomicAdd(sum, block_sum);
}
};
int main(int argc, char* argv[])
{
size_t MAXGRIDSIZE = get_maxGridSize();
/* ***** (thread) grid,block dims ***** */
/* min of N_x, number of (thread) blocks on grid in x-direction, and MAX_BLOCKS allowed is
* determined here */
unsigned int M_x = 1<<6; // M_x = number of threads in x-direction, in a single block, i.e. blocksize; 2^8=256
unsigned int L = 1<<7; // doesn't output correct values for n = 1<<30
unsigned int MAX_BLOCKS = (MAXGRIDSIZE + M_x - 1)/ M_x;
// notice how we're only launching 1/4 of L threads
unsigned int N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
int sharedBytes = M_x * sizeof(int);
/* ***** END of (thread) grid,block dims ***** */
// setup input, output
auto del_ints_lambda=[&](int* ptr) { cudaFree(ptr); };
std::unique_ptr<int,decltype(del_ints_lambda)> sum(nullptr,del_ints_lambda);
std::unique_ptr<int[],decltype(del_ints_lambda)> input(nullptr,del_ints_lambda);
cudaMallocManaged((void**)&sum, sizeof(int)) ;
cudaMallocManaged((void**)&input, L*sizeof(int));
std::fill_n(input.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int));
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input.get(),L);
/* sanity check */
// host output of sum
std::unique_ptr<int> h_sum = std::make_unique<int>( 0 );
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<7 : " << (1<<7) << std::endl;
/* ******************************************************* */
/* ********** more tests of \sum_{i=1}^L 1 = L ********** */
/* ***** L = 1<<8 = 2^8 = 256 test ***** */
L = 1<< 8;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input1(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input1,L*sizeof(int));
std::fill_n(input1.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int));
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input1.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " 1<<8 : " << (1<<8) << std::endl;
/* ***** L = 1<<9 + 1= 2^9 + 1= 513 test ***** */
L = (1<< 9)+1;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input2(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input2,L*sizeof(int));
std::fill_n(input2.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int));
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input2.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<9) + 1 : " << ((1<<9)+1) << std::endl;
/* ***** L = 1<<29 = 2^29 test ***** */
{
L = (1<< 29);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input3(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input3,L*sizeof(int));
std::fill_n(input3.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input3.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29) : " << (1<<29) << std::endl;
}
/* ***** L = (1<<29) + 2 = (2^29 + 2) test ***** */
{
L = (1<< 29)+2;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<29)+2 : " << ((1<<29)+2) << std::endl;
}
/* ***** L = 1<<30 = 2^30 test ***** */
{
L = (1<< 30);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30) : " << (1<<30) << std::endl;
}
/* ***** L = 1<<30 +3 = 2^30+3 test ***** */
{
L = (1<< 30)+3;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input4(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input4,L*sizeof(int));
std::fill_n(input4.get(),L,1);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input4.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " (1<<30)+3 : " << ((1<<30)+3) << std::endl;
}
/* ********** END of more tests of \sum_{i=1}^L 1 = L ********** */
/* ************************************************************ */
/* ********** more tests of \sum_{i=1}^L i = L(L+1)/2 ********** */
/* ***** L = 1<<15 = 2^15 test ***** */
{
L = (1<< 15);
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input5(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input5,L*sizeof(int));
inc_kernel<<< min((L+M_x-1)/M_x,MAX_BLOCKS), M_x>>>(input5.get(),1,L);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input5.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl;
}
/* ***** L = 1<<15 + 2 = 2^15 +2 test ***** */
{
L = (1<< 15) + 2;
// notice how we're only launching 1/4 of L threads
N_x = min( MAX_BLOCKS, ((L/4 + M_x - 1)/ M_x));
std::unique_ptr<int[],decltype(del_ints_lambda)> input6(nullptr,del_ints_lambda);
cudaMallocManaged((void**) &input6,L*sizeof(int));
inc_kernel<<< min((L+M_x-1)/M_x,MAX_BLOCKS), M_x>>>(input6.get(),1,L);
cudaMemset(sum.get(), 0,sizeof(int)); // reset the sum
sum_kernel<<<N_x,M_x,sharedBytes>>>(sum.get(),input6.get(),L);
/* sanity check */
// host output of sum
cudaMemcpy( h_sum.get(), sum.get(), 1*sizeof(int), cudaMemcpyDeviceToHost);
std::cout << std::endl << " *h_sum : " << *h_sum << " L(L+1)/2 : " << (L*(L+1)/2) << std::endl;
}
}
|
65fdc36e543ecf6f0bcc9e05919b26074bb08353.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2015 by Contributors
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./native_op-inl.h"
#include "./proposal-inl.h"
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of hipError_t error */ \
do { \
hipError_t error = condition; \
CHECK_EQ(error, hipSuccess) << " " << hipGetErrorString(error); \
} while (0)
namespace mshadow{
namespace cuda{
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int threadsPerBlock = sizeof(unsigned long long) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(unsigned long long) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(hipMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
hipLaunchKernelGGL(( nms_kernel), dim3(blocks), dim3(threads), 0, 0, boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(hipMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
hipMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(hipFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
}
}
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1) << "Sorry, multiple images each device is not implemented.";
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr = reinterpret_cast<real_t *>(in_data[proposal::kClsProb].dptr_) + fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count; // set to -1 for max
rpn_pre_nms_top_n = ::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = ::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
FRCNN_CUDA_CHECK(hipMemcpy(workspace_proposals.dptr_, &anchors[0], sizeof(float) * anchors.size(),
hipMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
hipLaunchKernelGGL(( ProposalGridKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(hipMemcpy(&cpu_im_info[0], im_info.dptr_, sizeof(float) * cpu_im_info.size(), hipMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
hipLaunchKernelGGL(( IoUPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
hipLaunchKernelGGL(( BBoxPredKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
hipLaunchKernelGGL(( FilterBoxKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
hipLaunchKernelGGL(( CopyScoreKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(hipMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr, Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
hipLaunchKernelGGL(( ReorderProposalsKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_pre_nms_top_n, workspace_proposals.dptr_, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
FRCNN_CUDA_CHECK(hipFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(hipFree(score_ptr));
FRCNN_CUDA_CHECK(hipFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(hipMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(hipMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(), hipMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
hipLaunchKernelGGL(( PrepareOutput), dim3(dimGrid), dim3(dimBlock), 0, 0,
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size,
out.dptr_, out_score.dptr_);
FRCNN_CUDA_CHECK(hipPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(hipFree(keep));
FRCNN_CUDA_CHECK(hipFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
| 65fdc36e543ecf6f0bcc9e05919b26074bb08353.cu | /*!
* Copyright (c) 2015 by Contributors
* \file proposal.cu
* \brief Proposal Operator
* \author Shaoqing Ren, Jian Guo
*/
#include <dmlc/logging.h>
#include <dmlc/parameter.h>
#include <mxnet/operator.h>
#include <map>
#include <vector>
#include <string>
#include <utility>
#include <ctime>
#include <iostream>
#include <mshadow/tensor.h>
#include <mshadow/cuda/reduce.cuh>
#include <thrust/sort.h>
#include <thrust/execution_policy.h>
#include <thrust/functional.h>
#include "./operator_common.h"
#include "./mshadow_op.h"
#include "./native_op-inl.h"
#include "./proposal-inl.h"
#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0))
#define FRCNN_CUDA_CHECK(condition) \
/* Code block avoids redefinition of cudaError_t error */ \
do { \
cudaError_t error = condition; \
CHECK_EQ(error, cudaSuccess) << " " << cudaGetErrorString(error); \
} while (0)
namespace mshadow{
namespace cuda{
// scores are (b, anchor, h, w)
// workspace_proposals are (h * w * anchor, 5)
// w defines "x" and h defines "y"
// count should be total anchors numbers, h * w * anchors
template<typename Dtype>
__global__ void ProposalGridKernel(const int count,
const int num_anchors,
const int height,
const int width,
const int feature_stride,
const Dtype* scores,
Dtype* workspace_proposals) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % width;
int h = index / num_anchors / width;
workspace_proposals[index * 5 + 0] = workspace_proposals[a * 5 + 0] + w * feature_stride;
workspace_proposals[index * 5 + 1] = workspace_proposals[a * 5 + 1] + h * feature_stride;
workspace_proposals[index * 5 + 2] = workspace_proposals[a * 5 + 2] + w * feature_stride;
workspace_proposals[index * 5 + 3] = workspace_proposals[a * 5 + 3] + h * feature_stride;
workspace_proposals[index * 5 + 4] = scores[(a * height + h) * width + w];
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void BBoxPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float width = boxes[index * 5 + 2] - boxes[index * 5 + 0] + 1.0f;
float height = boxes[index * 5 + 3] - boxes[index * 5 + 1] + 1.0f;
float ctr_x = boxes[index * 5 + 0] + 0.5f * (width - 1.0f);
float ctr_y = boxes[index * 5 + 1] + 0.5f * (height - 1.0f);
float dx = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dw = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dh = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_ctr_x = dx * width + ctr_x;
float pred_ctr_y = dy * height + ctr_y;
float pred_w = exp(dw) * width;
float pred_h = exp(dh) * height;
float pred_x1 = pred_ctr_x - 0.5f * (pred_w - 1.0f);
float pred_y1 = pred_ctr_y - 0.5f * (pred_h - 1.0f);
float pred_x2 = pred_ctr_x + 0.5f * (pred_w - 1.0f);
float pred_y2 = pred_ctr_y + 0.5f * (pred_h - 1.0f);
pred_x1 = max(min(pred_x1, im_width - 1.0f), 0.0f);
pred_y1 = max(min(pred_y1, im_height - 1.0f), 0.0f);
pred_x2 = max(min(pred_x2, im_width - 1.0f), 0.0f);
pred_y2 = max(min(pred_y2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// boxes are (h * w * anchor, 5)
// deltas are (b, 4 * anchor, h, w)
// out_pred_boxes are (h * w * anchor, 5)
// count should be total anchors numbers, h * w * anchors
// in-place write: boxes and out_pred_boxes are the same location
template<typename Dtype>
__global__ void IoUPredKernel(const int count,
const int num_anchors,
const int feat_height,
const int feat_width,
const int real_height,
const int real_width,
const float im_height,
const float im_width,
const Dtype* boxes,
const Dtype* deltas,
Dtype* out_pred_boxes) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
int a = index % num_anchors;
int w = (index / num_anchors) % feat_width;
int h = index / num_anchors / feat_width;
float x1 = boxes[index * 5 + 0];
float y1 = boxes[index * 5 + 1];
float x2 = boxes[index * 5 + 2];
float y2 = boxes[index * 5 + 3];
float dx1 = deltas[((a * 4) * feat_height + h) * feat_width + w];
float dy1 = deltas[((a * 4 + 1) * feat_height + h) * feat_width + w];
float dx2 = deltas[((a * 4 + 2) * feat_height + h) * feat_width + w];
float dy2 = deltas[((a * 4 + 3) * feat_height + h) * feat_width + w];
float pred_x1 = max(min(x1 + dx1, im_width - 1.0f), 0.0f);
float pred_y1 = max(min(y1 + dy1, im_height - 1.0f), 0.0f);
float pred_x2 = max(min(x2 + dx2, im_width - 1.0f), 0.0f);
float pred_y2 = max(min(y2 + dy2, im_height - 1.0f), 0.0f);
out_pred_boxes[index * 5 + 0] = pred_x1;
out_pred_boxes[index * 5 + 1] = pred_y1;
out_pred_boxes[index * 5 + 2] = pred_x2;
out_pred_boxes[index * 5 + 3] = pred_y2;
if (h >= real_height || w >= real_width) {
out_pred_boxes[index * 5 + 4] = -1.0f;
}
}
}
// filter box with stride less than rpn_min_size
// filter: set score to zero
// dets (n, 5)
template<typename Dtype>
__global__ void FilterBoxKernel(const int count,
const float min_size,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
float iw = dets[index * 5 + 2] - dets[index * 5 + 0] + 1.0f;
float ih = dets[index * 5 + 3] - dets[index * 5 + 1] + 1.0f;
if (iw < min_size || ih < min_size) {
dets[index * 5 + 0] -= min_size / 2;
dets[index * 5 + 1] -= min_size / 2;
dets[index * 5 + 2] += min_size / 2;
dets[index * 5 + 3] += min_size / 2;
dets[index * 5 + 4] = -1.0f;
}
}
}
// copy score and init order
// dets (n, 5); score (n, ); order (n, )
// count should be n (total anchors or proposals)
template<typename Dtype>
__global__ void CopyScoreKernel(const int count,
const Dtype* dets,
Dtype* score,
int* order) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
score[index] = dets[index * 5 + 4];
order[index] = index;
}
}
// reorder proposals according to order and keep the top_n proposals
// prev_dets (n, 5); order (n, ); dets (n, 5)
// count should be output anchor numbers (top_n)
template<typename Dtype>
__global__ void ReorderProposalsKernel(const int count,
const Dtype* prev_dets,
const int* order,
Dtype* dets) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
const int order_i = order[index];
for (int j = 0; j < 5; j ++) {
dets[index * 5 + j] = prev_dets[order_i * 5 + j];
}
}
}
__device__ inline float devIoU(float const * const a, float const * const b) {
float left = max(a[0], b[0]), right = min(a[2], b[2]);
float top = max(a[1], b[1]), bottom = min(a[3], b[3]);
float width = max(right - left + 1, 0.f), height = max(bottom - top + 1, 0.f);
float interS = width * height;
float Sa = (a[2] - a[0] + 1) * (a[3] - a[1] + 1);
float Sb = (b[2] - b[0] + 1) * (b[3] - b[1] + 1);
return interS / (Sa + Sb - interS);
}
__global__ void nms_kernel(const int n_boxes, const float nms_overlap_thresh,
const float *dev_boxes, unsigned long long *dev_mask) {
const int threadsPerBlock = sizeof(unsigned long long) * 8;
const int row_start = blockIdx.y;
const int col_start = blockIdx.x;
// if (row_start > col_start) return;
const int row_size =
min(n_boxes - row_start * threadsPerBlock, threadsPerBlock);
const int col_size =
min(n_boxes - col_start * threadsPerBlock, threadsPerBlock);
__shared__ float block_boxes[threadsPerBlock * 5];
if (threadIdx.x < col_size) {
block_boxes[threadIdx.x * 5 + 0] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 0];
block_boxes[threadIdx.x * 5 + 1] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 1];
block_boxes[threadIdx.x * 5 + 2] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 2];
block_boxes[threadIdx.x * 5 + 3] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 3];
block_boxes[threadIdx.x * 5 + 4] =
dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 5 + 4];
}
__syncthreads();
if (threadIdx.x < row_size) {
const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x;
const float *cur_box = dev_boxes + cur_box_idx * 5;
int i = 0;
unsigned long long t = 0;
int start = 0;
if (row_start == col_start) {
start = threadIdx.x + 1;
}
for (i = start; i < col_size; i++) {
if (devIoU(cur_box, block_boxes + i * 5) > nms_overlap_thresh) {
t |= 1ULL << i;
}
}
const int col_blocks = DIVUP(n_boxes, threadsPerBlock);
dev_mask[cur_box_idx * col_blocks + col_start] = t;
}
}
void _nms(const mshadow::Tensor<gpu, 2>& boxes,
const float nms_overlap_thresh,
int *keep,
int *num_out) {
const int threadsPerBlock = sizeof(unsigned long long) * 8;
const int boxes_num = boxes.size(0);
const int boxes_dim = boxes.size(1);
float* boxes_dev = boxes.dptr_;
unsigned long long* mask_dev = NULL;
const int col_blocks = DIVUP(boxes_num, threadsPerBlock);
FRCNN_CUDA_CHECK(cudaMalloc(&mask_dev,
boxes_num * col_blocks * sizeof(unsigned long long)));
dim3 blocks(DIVUP(boxes_num, threadsPerBlock),
DIVUP(boxes_num, threadsPerBlock));
dim3 threads(threadsPerBlock);
nms_kernel<<<blocks, threads>>>(boxes_num,
nms_overlap_thresh,
boxes_dev,
mask_dev);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
std::vector<unsigned long long> mask_host(boxes_num * col_blocks);
FRCNN_CUDA_CHECK(cudaMemcpy(&mask_host[0],
mask_dev,
sizeof(unsigned long long) * boxes_num * col_blocks,
cudaMemcpyDeviceToHost));
std::vector<unsigned long long> remv(col_blocks);
memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks);
int num_to_keep = 0;
for (int i = 0; i < boxes_num; i++) {
int nblock = i / threadsPerBlock;
int inblock = i % threadsPerBlock;
if (!(remv[nblock] & (1ULL << inblock))) {
keep[num_to_keep++] = i;
unsigned long long *p = &mask_host[0] + i * col_blocks;
for (int j = nblock; j < col_blocks; j++) {
remv[j] |= p[j];
}
}
}
*num_out = num_to_keep;
FRCNN_CUDA_CHECK(cudaFree(mask_dev));
}
// copy proposals to output
// dets (top_n, 5); keep (top_n, ); out (top_n, )
// count should be top_n (total anchors or proposals)
template<typename Dtype>
__global__ void PrepareOutput(const int count,
const Dtype* dets,
const int* keep,
const int out_size,
Dtype* out,
Dtype* score) {
for (int index = blockIdx.x * blockDim.x + threadIdx.x;
index < count;
index += blockDim.x * gridDim.x) {
out[index * 5] = 0;
if (index < out_size) {
int keep_i = keep[index];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
} else {
int keep_i = keep[index % out_size];
for (int j = 0; j < 4; ++j) {
out[index * 5 + j + 1] = dets[keep_i * 5 + j];
}
score[index] = dets[keep_i * 5 + 4];
}
}
}
}
}
namespace mxnet {
namespace op {
template<typename xpu>
class ProposalGPUOp : public Operator{
public:
explicit ProposalGPUOp(ProposalParam param) {
this->param_ = param;
}
virtual void Forward(const OpContext &ctx,
const std::vector<TBlob> &in_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &out_data,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
using namespace mshadow::cuda;
CHECK_EQ(in_data.size(), 3);
CHECK_EQ(out_data.size(), 2);
CHECK_GT(req.size(), 1);
CHECK_EQ(req[proposal::kOut], kWriteTo);
CHECK_EQ(in_data[proposal::kClsProb].shape_[0], 1) << "Sorry, multiple images each device is not implemented.";
Stream<xpu> *s = ctx.get_stream<xpu>();
Shape<4> fg_scores_shape = Shape4(in_data[proposal::kClsProb].shape_[0],
in_data[proposal::kClsProb].shape_[1] / 2,
in_data[proposal::kClsProb].shape_[2],
in_data[proposal::kClsProb].shape_[3]);
real_t* foreground_score_ptr = reinterpret_cast<real_t *>(in_data[proposal::kClsProb].dptr_) + fg_scores_shape.Size();
Tensor<xpu, 4> scores = Tensor<xpu, 4>(foreground_score_ptr, fg_scores_shape);
Tensor<xpu, 4> bbox_deltas = in_data[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> im_info = in_data[proposal::kImInfo].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out = out_data[proposal::kOut].get<xpu, 2, real_t>(s);
Tensor<xpu, 2> out_score = out_data[proposal::kScore].get<xpu, 2, real_t>(s);
int num_anchors = in_data[proposal::kClsProb].shape_[1] / 2;
int height = scores.size(2);
int width = scores.size(3);
int count = num_anchors * height * width; // count of total anchors
int rpn_pre_nms_top_n = (param_.rpn_pre_nms_top_n > 0) ? param_.rpn_pre_nms_top_n : count; // set to -1 for max
rpn_pre_nms_top_n = std::min(rpn_pre_nms_top_n, count);
int rpn_post_nms_top_n = std::min(param_.rpn_post_nms_top_n, rpn_pre_nms_top_n);
// Generate first anchors based on base anchor
std::vector<float> base_anchor(4);
base_anchor[0] = 0.0;
base_anchor[1] = 0.0;
base_anchor[2] = param_.feature_stride - 1.0;
base_anchor[3] = param_.feature_stride - 1.0;
CHECK_EQ(num_anchors, param_.ratios.info.size() * param_.scales.info.size());
std::vector<float> anchors;
utils::GenerateAnchors(base_anchor,
param_.ratios.info,
param_.scales.info,
anchors);
// Copy generated anchors to GPU
float* workspace_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_proposals_ptr, sizeof(float) * count * 5));
Tensor<xpu, 2> workspace_proposals(workspace_proposals_ptr, Shape2(count, 5));
FRCNN_CUDA_CHECK(cudaMemcpy(workspace_proposals.dptr_, &anchors[0], sizeof(float) * anchors.size(),
cudaMemcpyHostToDevice));
// Copy proposals to a mesh grid
dim3 dimGrid((count + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock);
dim3 dimBlock(kMaxThreadsPerBlock);
CheckLaunchParam(dimGrid, dimBlock, "ProposalGrid");
ProposalGridKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, param_.feature_stride,
scores.dptr_, workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// im_info is small, we want to copy them to cpu
std::vector<float> cpu_im_info(3);
FRCNN_CUDA_CHECK(cudaMemcpy(&cpu_im_info[0], im_info.dptr_, sizeof(float) * cpu_im_info.size(), cudaMemcpyDeviceToHost));
// prevent padded predictions
int real_height = static_cast<int>(cpu_im_info[0] / param_.feature_stride);
int real_width = static_cast<int>(cpu_im_info[1] / param_.feature_stride);
CHECK_GE(height, real_height) << height << " " << real_height << std::endl;
CHECK_GE(width, real_width) << width << " " << real_width << std::endl;
// Transform anchors and bbox_deltas into bboxes
CheckLaunchParam(dimGrid, dimBlock, "BBoxPred");
if (param_.iou_loss) {
IoUPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
} else {
BBoxPredKernel<<<dimGrid, dimBlock>>>(
count, num_anchors, height, width, real_height, real_width,
cpu_im_info[0], cpu_im_info[1],
workspace_proposals.dptr_, bbox_deltas.dptr_, workspace_proposals.dptr_);
}
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// filter boxes with less than rpn_min_size
CheckLaunchParam(dimGrid, dimBlock, "FilterBox");
FilterBoxKernel<<<dimGrid, dimBlock>>>(
count, param_.rpn_min_size * cpu_im_info[2], workspace_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Copy score to a continuous memory
float* score_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&score_ptr, sizeof(float) * count));
Tensor<xpu, 1> score(score_ptr, Shape1(count));
int* order_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&order_ptr, sizeof(int) * count));
Tensor<xpu, 1, int> order(order_ptr, Shape1(count));
CheckLaunchParam(dimGrid, dimBlock, "CopyScore");
CopyScoreKernel<<<dimGrid, dimBlock>>>(
count, workspace_proposals.dptr_, score.dptr_, order.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// argsort score, save order
thrust::stable_sort_by_key(thrust::device,
score.dptr_,
score.dptr_ + score.size(0),
order.dptr_,
thrust::greater<real_t>());
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// Reorder proposals according to order
float* workspace_ordered_proposals_ptr = NULL;
FRCNN_CUDA_CHECK(cudaMalloc(&workspace_ordered_proposals_ptr, sizeof(float) * rpn_pre_nms_top_n * 5));
Tensor<xpu, 2> workspace_ordered_proposals(workspace_ordered_proposals_ptr, Shape2(rpn_pre_nms_top_n, 5));
dimGrid.x = (rpn_pre_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "ReorderProposals");
ReorderProposalsKernel<<<dimGrid, dimBlock>>>(
rpn_pre_nms_top_n, workspace_proposals.dptr_, order.dptr_, workspace_ordered_proposals.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
FRCNN_CUDA_CHECK(cudaFree(workspace_proposals_ptr));
FRCNN_CUDA_CHECK(cudaFree(score_ptr));
FRCNN_CUDA_CHECK(cudaFree(order_ptr));
// perform nms
std::vector<int> _keep(workspace_ordered_proposals.size(0));
int out_size = 0;
_nms(workspace_ordered_proposals,
param_.threshold,
&_keep[0],
&out_size);
// copy nms result to gpu
int* keep;
FRCNN_CUDA_CHECK(cudaMalloc(&keep, sizeof(int) * _keep.size()));
FRCNN_CUDA_CHECK(cudaMemcpy(keep, &_keep[0], sizeof(int) * _keep.size(), cudaMemcpyHostToDevice));
// copy results after nms
dimGrid.x = (rpn_post_nms_top_n + kMaxThreadsPerBlock - 1) / kMaxThreadsPerBlock;
CheckLaunchParam(dimGrid, dimBlock, "PrepareOutput");
PrepareOutput<<<dimGrid, dimBlock>>>(
rpn_post_nms_top_n, workspace_ordered_proposals.dptr_, keep, out_size,
out.dptr_, out_score.dptr_);
FRCNN_CUDA_CHECK(cudaPeekAtLastError());
// free temporary memory
FRCNN_CUDA_CHECK(cudaFree(keep));
FRCNN_CUDA_CHECK(cudaFree(workspace_ordered_proposals_ptr));
}
virtual void Backward(const OpContext &ctx,
const std::vector<TBlob> &out_grad,
const std::vector<TBlob> &in_data,
const std::vector<TBlob> &out_data,
const std::vector<OpReqType> &req,
const std::vector<TBlob> &in_grad,
const std::vector<TBlob> &aux_states) {
using namespace mshadow;
using namespace mshadow::expr;
CHECK_EQ(in_grad.size(), 3);
Stream<xpu> *s = ctx.get_stream<xpu>();
Tensor<xpu, 4> gscores = in_grad[proposal::kClsProb].get<xpu, 4, real_t>(s);
Tensor<xpu, 4> gbbox = in_grad[proposal::kBBoxPred].get<xpu, 4, real_t>(s);
Tensor<xpu, 2> ginfo = in_grad[proposal::kImInfo].get<xpu, 2, real_t>(s);
// can not assume the grad would be zero
Assign(gscores, req[proposal::kClsProb], 0);
Assign(gbbox, req[proposal::kBBoxPred], 0);
Assign(ginfo, req[proposal::kImInfo], 0);
}
private:
ProposalParam param_;
}; // class ProposalGPUOp
template<>
Operator* CreateOp<gpu>(ProposalParam param) {
return new ProposalGPUOp<gpu>(param);
}
} // namespace op
} // namespace mxnet
|
a45f1bd43693c41af16f433a5df519b10e9bd453.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
using namespace std;
using HistType = uint32_t;
enum class Mode {
CPU,
OMP,
OMP_NOATOMIC,
CUDA,
CUDA_NOATOMIC,
CUDA_SHARED,
};
enum class AtomicTypeCuda {
NONE,
STANDARD,
SHARED,
};
__global__ void _computeHistogramCudaNoAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) {
size_t stride = blockDim.x * gridDim.x;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length;
i += stride) {
histogram[bytes[i]]++;
}
} | a45f1bd43693c41af16f433a5df519b10e9bd453.cu | #include "includes.h"
using namespace std;
using HistType = uint32_t;
enum class Mode {
CPU,
OMP,
OMP_NOATOMIC,
CUDA,
CUDA_NOATOMIC,
CUDA_SHARED,
};
enum class AtomicTypeCuda {
NONE,
STANDARD,
SHARED,
};
__global__ void _computeHistogramCudaNoAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) {
size_t stride = blockDim.x * gridDim.x;
for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length;
i += stride) {
histogram[bytes[i]]++;
}
} |
e268c82c7b9e3db48a5a7d472f6d64bdbade9295.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Copyright (c) 2009,2010, Volodymyr Mnih
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <rocblas.h>
#include <math.h>
#include "rnd_multipliers_32bit.h"
#include "cudamat_kernels.cuh"
#ifdef __cplusplus
extern "C" {
#endif
#include "cudamat.cuh"
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = hipblasGetError();
return status != HIPBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
hipError_t err = hipGetLastError();
if (hipSuccess != err)
printf("%s\n", hipGetErrorString( err));
return hipSuccess != err;
}
const char* get_last_cuda_error() {
hipError_t err = hipGetLastError();
return hipGetErrorString( err);
}
int cublas_init() {
hipblasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int cublas_shutdown() {
hipblasShutdown();
hipDeviceReset();
return 0;
}
int cuda_record_event(hipEvent_t* t) {
hipError_t err = hipEventRecord(*t, 0);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_synchronize_event(hipEvent_t* t) {
//hipError_t err = hipEventSynchronize(*t);
hipError_t err = hipStreamWaitEvent(NULL, *t, 0);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_create_event(hipEvent_t* t) {
//hipError_t err = hipEventCreateWithFlags(t, hipEventBlockingSync);
hipError_t err = hipEventCreate(t);
if (hipSuccess != err) {
printf("%s\n", hipGetErrorString( err));
}
return hipSuccess != err;
}
int cuda_set_device(int deviceId) {
hipSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
bool cuda_is_fermi(int deviceId) {
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, deviceId);
return prop.major >= 2;
}
int cuda_set_P2P(int gpu1, int gpu2) {
bool is_fermi = cuda_is_fermi(gpu1) && cuda_is_fermi(gpu2);
int access2from1, access1from2;
hipDeviceCanAccessPeer(&access2from1, gpu1, gpu2);
hipDeviceCanAccessPeer(&access1from2, gpu2, gpu1);
//printf("%d can access %d : %d\n ", gpu1, gpu2, access2from1);
//printf("%d can access %d : %d\n ", gpu2, gpu1, access1from2);
bool same_complex = false;
if(access2from1==1 && access1from2==1) same_complex = true;
if(is_fermi && same_complex) {
hipSetDevice(gpu1);
hipDeviceEnablePeerAccess(gpu2, 0); //second argument is flags
hipSetDevice(gpu2);
hipDeviceEnablePeerAccess(gpu1, 0); //second argument is flags
return 0;
} else {
return CUDA_ERROR;
}
}
int destroy_tex(cudamat* mat) {
if (mat->tex_obj != 0) {
hipError_t err = hipDestroyTextureObject(mat->tex_obj);
if (hipSuccess != err) {
mat->tex_obj = 0;
return 0;
} else {
return CUDA_ERROR;
}
}
return 0;
}
int init_random(rnd_struct* rnd_state, int seed) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
for (int i = 0; i < NUM_RND_STREAMS; i++) {
host_mults[i] = _rand_words[i];
}
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
hipblasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
hipblasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
free(host_mults);
hipDeviceSynchronize();
hipLaunchKernelGGL(( kSeedRandom), dim3(NUM_RND_BLOCKS), dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, seed);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
// Allocates and gives up ownership of pointer. Caller must free.
int get_rnd_state(rnd_struct* rnd_state, unsigned long long* host_words_out, int *size_out) {
*size_out = NUM_RND_STREAMS;
host_words_out = (unsigned long long*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
if (host_words_out == NULL) {
return ERROR_GENERIC; // Out of memory.
}
hipblasGetVector(NUM_RND_STREAMS, sizeof(unsigned long long), rnd_state->dev_words, 1, host_words_out, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
void cuda_sync_threads() {
hipDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
int allocate_device_memory(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = hipblasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_bbox(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
cublasStatus stat;
stat = hipblasAlloc(size, sizeof(int), (void**)&mat->data_device.seg);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(numboxes, sizeof(int), (void**)&mat->data_device.labels);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(4 * numboxes, sizeof(int), (void**)&mat->data_device.boxes);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = hipblasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = hipblasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int copy_to_host_slice(cudamat* mat, size_t start, size_t end) {
if (start >= end || end > mat->size[1])
return ERROR_GENERIC;
size_t len = mat->size[0] * (end - start);
size_t offset = mat->size[0] * start;
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device + offset, 1, mat->data_host + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_to_host(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
if (mat->on_device) {
hipblasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_bbox_to_host(cudamat_bbox* mat) {
if (mat->on_device) {
hipblasGetVector(mat->size, sizeof(int), mat->data_device.seg, 1, mat->data_host.seg, 1);
hipblasGetVector(mat->numboxes, sizeof(int), mat->data_device.labels, 1, mat->data_host.labels, 1);
hipblasGetVector(4 * mat->numboxes, sizeof(int), mat->data_device.boxes, 1, mat->data_host.boxes, 1);
if (check_cublas_error()) return CUBLAS_ERROR;
} else {
return ERROR_NOT_ON_DEVICE;
}
return 0;
}
int copy_to_device_slice(cudamat* mat, size_t start, size_t end) {
if (end <= start || end > mat->size[1])
return ERROR_GENERIC;
size_t len = mat->size[0] * (end - start);
int err_code = 0;
size_t offset = mat->size[0] * start;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host + offset, 1, mat->data_device + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_to_device(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_bbox_to_device(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_bbox(mat);
if (err_code)
return err_code;
}
hipblasSetVector(size, sizeof(int), mat->data_host.seg, 1, mat->data_device.seg, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(numboxes, sizeof(int), mat->data_host.labels, 1, mat->data_device.labels, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(4 * numboxes, sizeof(int), mat->data_host.boxes, 1, mat->data_device.boxes, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
hipblasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
hipblasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
// mat 1 : source
// mat 2 : dest
int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipMemcpy(mat2->data_device, mat1->data_device, len * sizeof(float), hipMemcpyDefault);
//hipblasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int copy_on_device_p2p_async(cudamat* src, cudamat* dst, int src_dev, int dst_dev) {
int len = src->size[0]*src->size[1];
if (src->size[0] != dst->size[0] || src->size[1] != dst->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipMemcpyPeerAsync(dst->data_device, dst_dev, src->data_device, src_dev, len * sizeof(float));
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kGetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
hipLaunchKernelGGL(( kSetRowSlice), dim3(kernelBlockGrid),dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose_big_matrix(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kTransposeBig), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kTranspose), dim3(grid), dim3(threads) , 0, 0, target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int free_device_memory_bbox(cudamat_bbox* mat) {
if (mat->on_device) {
cublasStatus stat;
stat = hipblasFree(mat->data_device.seg);
stat = hipblasFree(mat->data_device.labels);
stat = hipblasFree(mat->data_device.boxes);
mat->on_device = 0;
if (stat != HIPBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int set_shape4d(Shape4D* shape, unsigned int s1, unsigned int s2, unsigned s3, unsigned s4) {
shape->shape[0] = s1;
shape->shape[1] = s2;
shape->shape[2] = s3;
shape->shape[3] = s4;
return 0;
}
int reshape(cudamat* mat, int m, int n) {
if (m < 0 && n < 0)
return ERROR_GENERIC;
if (m < 0)
m = (mat->size[0] * mat->size[1]) / n;
if (n < 0)
n = (mat->size[0] * mat->size[1]) / m;
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = source->data_host + first_col * num_rows;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
target->tex_obj = 0;
return 0;
}
int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->tex_obj = 0;
}
void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
mat->tex_obj = 0;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomUniform), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulli), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleBernoulliTanh), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSamplePoisson), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kSampleGaussian), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbEnergy), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kPerturbProb), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomDropout), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int gaussian_dropout(rnd_struct* rnd_state, cudamat* mat, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kRandomGaussianDropout), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
hipLaunchKernelGGL(( kAddColVector), dim3(num_blocks),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
hipLaunchKernelGGL(( kAddColMult), dim3(num_blocks),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_to_each_pixel(cudamat* mat1, cudamat* mat2, cudamat* target, float mult) {
unsigned int h = mat1->size[0],
w = mat1->size[1],
num_colors = mat2->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans || mat2->is_trans)
return ERROR_TRANSPOSED;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] % mat2->size[1] != 0 ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddToEachPixel), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, mult, w, h, w / num_colors);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonalScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddDiagonal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
hipLaunchKernelGGL(( kAddRowMult), dim3(num_blocks),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
hipLaunchKernelGGL(( kAddRowVector), dim3(num_blocks),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByColVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivByRowVector), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLessThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEq), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThan), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBound), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanEqScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kGreaterThanScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_mod_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kUpperBoundModScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLowerBoundScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxAndAccumulate), dim3(gridDim),dim3(32), 0, 0, mat->data_device, acc->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kChooseMaxColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kArgMaxColumnwise), dim3(gridDim),dim3(32), 0, 0, mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSqSumColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = (h + h1 - 1) / h1;
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSqSumRowwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// Avoids using ones vector.
float sum_all(cudamat* mat, int* err_code) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
const int max_num_blocks = 8; // Should be around max number of concurrent blocks.
float res_host[max_num_blocks];
float* res_device;
int num_blocks = MIN(max_num_blocks, DIVUP(len, 4 * NUM_VECTOR_OP_THREADS_PER_BLOCK));
hipMalloc((void**)&res_device, num_blocks * sizeof(float));
int shared_mem_size = NUM_VECTOR_OP_THREADS_PER_BLOCK * sizeof(float) ;
int left_over = len % num_blocks;
int len_per_block = len / num_blocks;
hipLaunchKernelGGL(( kSumAll), dim3(num_blocks), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), shared_mem_size, 0, mat->data_device, res_device, len, len_per_block, left_over);
hipMemcpy(res_host, res_device, num_blocks * sizeof(float), hipMemcpyDeviceToHost);
hipFree(res_device);
float val = 0;
for (int i = 0; i < num_blocks; i++) val += res_host[i];
if (checkCUDAError())
*err_code = CUDA_ERROR;
return val;
}
int sum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSumColumnwise), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(h, NUM_VECTOR_OP_THREADS_PER_BLOCK);
int h1 = floor(sqrt(num_blocks));
int h2 = DIVUP(num_blocks, h1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSumRowwise), dim3(gridDim), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm, int constraint) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
if (axis == 0) {
int w1 = floor(sqrt(w));
int w2 = DIVUP(w, w1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kNormLimitColumnwise), dim3(gridDim),dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, norm, w, h, constraint);
} else {
int h1 = floor(sqrt(h));
int h2 = DIVUP(h, h1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kNormLimitRowwise), dim3(gridDim),dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, norm, w, h, constraint);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyCos), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySin), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplySigmoid), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyTanh), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyAbs), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kApplyLog1PlusExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// target = 2 / (1 + exp(-mat * lambda)) - 1
int apply_relu_squash(cudamat* mat, cudamat* target, float lambda) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSquashRelu), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len, lambda);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLog), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCeil), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kFloor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSqrt), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPow), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kPowMatrix), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy(cudamat* dist1, cudamat* dist2, cudamat* target, float tiny) {
unsigned int len = dist1->size[0] * dist1->size[1];
if (!dist1->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (dist1->size[0] != target->size[0] || dist1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (dist1->size[0] != dist2->size[0] || dist1->size[1] != dist2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, dist1->data_device, dist2->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCrossEntropyBernoulli), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCorrectPreds), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, pow->data_device, target->data_device, len, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kReciprocal), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// target = beta * target + alpha * mat1 * mat2
int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = get_leading_dimension(mat1),
k2 = get_nonleading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2),
m2 = get_leading_dimension(target),
n2 = get_nonleading_dimension(target);
if (m != m2 || n != n2 || k != k2) {
printf("%d %d %d %d %d %d\n", m, k2, k, n, m2, n2);
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
hipblasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kSparseDot), dim3(grid), dim3(threads), 0, 0, m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = hipblasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipblasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddMultSign), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSubtract), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivide), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMult), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSinDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kCosDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogisticDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out_grad - output gradient
int apply_logistic_grad(cudamat* mat1, cudamat* mat2, cudamat* out_grad) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !out_grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out_grad->size[0] || mat1->size[1] != out_grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kLogisticGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, out_grad->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out - .
int get_logistic_correct_normalized(cudamat* mat1, cudamat* mat2, cudamat* out) {
if (!mat1->on_device || !mat2->on_device || !out->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out->size[0] || 1 != out->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(mat1->size[0], NUM_VECTOR_OP_THREADS_PER_BLOCK);
hipLaunchKernelGGL(( kLogisticCorrectNormalized), dim3(num_blocks), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, out->data_device, mat1->size[0], mat1->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kTanhDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRectifiedLinearSmoothDeriv), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
hipLaunchKernelGGL(( kAssignScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int write_at(cudamat* mat, int row, int col, float val) {
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (row >= mat->size[0] || col >= mat->size[1] || row < 0 || col < 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipMemcpy(mat->data_device + col * mat->size[0] + row, &val, sizeof(float), hipMemcpyHostToDevice);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float read_from(cudamat* mat, int row, int col, int* err_code) {
*err_code = 0;
if (!mat->on_device)
*err_code = ERROR_NOT_ON_DEVICE;
if (row >= mat->size[0] || col >= mat->size[1] || row < 0 || col < 0)
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
float val = 0;
hipMemcpy(&val, mat->data_device + col * mat->size[0] + row, sizeof(float), hipMemcpyDeviceToHost);
if (checkCUDAError())
*err_code = CUDA_ERROR;
return val;
}
int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kMultScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kDivideScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAddScalar), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = hipblasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSelectRows), dim3(gridDim), dim3(blockDim), 0, 0, source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
hipLaunchKernelGGL(( kSwapColumns), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int shuffleColumns(cudamat* source, cudamat* rand_perm_indices) {
const int h = source->size[0],
w = source->size[1];
if (rand_perm_indices->size[0] != 1 || rand_perm_indices->size[1] != w) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
hipLaunchKernelGGL(( kShuffleColumns), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, source->data_device, rand_perm_indices->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
hipLaunchKernelGGL(( kSetSelectedRows), dim3(gridDim), dim3(blockDim), 0, 0, target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kGenerateTranslationsBigVarOff), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int extract_patches(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height) {
int num_images = images->size[1];
int num_colors = images->size[0] / (img_width * img_height);
if (patches->size[1] != num_colors * patch_width * patch_height || patches->size[0] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int grid_y = patch_height / COPY_BLOCK_SIZE;
if (patch_height % COPY_BLOCK_SIZE)
grid_y++;
unsigned int grid_x = patch_width / COPY_BLOCK_SIZE;
if (patch_width % COPY_BLOCK_SIZE)
grid_x++;
dim3 grid(grid_x, grid_y, num_images * num_colors);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE);
hipLaunchKernelGGL(( kExtractPatches2), dim3(grid), dim3(threads), 0, 0,
images->data_device, patches->data_device, width_offset->data_device,
height_offset->data_device, flip->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
//*/
/*
kExtractPatches<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
images->data_device, patches->data_device, indices->data_device, width_offset->data_device,
height_offset->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
*/
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int rectify_bounding_boxes(cudamat* boxes, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int patch_width, int patch_height) {
int num_images = boxes->size[0];
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_locs = boxes->size[1] / 4;
dim3 grid(MIN(NUM_VECTOR_OP_BLOCKS, num_locs));
dim3 threads(MIN(NUM_VECTOR_OP_THREADS_PER_BLOCK, num_images));
hipLaunchKernelGGL(( kRectifyBoundingBox), dim3(grid), dim3(threads), 0, 0,
boxes->data_device, width_offset->data_device, height_offset->data_device,
flip->data_device, num_images, patch_width, patch_height, num_locs);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
hipLaunchKernelGGL(( kBlockify), dim3(kernelBlockGrid), dim3(kernelBlockDim), 0, 0, source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMax), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxOverwrite), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_row_major(cudamat* mat) {
return softmax_row_major_multi(mat, mat->size[1]);
}
int softmax_row_major_multi(cudamat* mat, int numslices) {
unsigned int len = mat->size[0] * mat->size[1];
unsigned int h = len / numslices;
if (len % numslices != 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxRowMajor), dim3(gridDim), dim3(32), shared_mem_size, 0, mat->data_device, numslices, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGradCLS), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxGradRowMajor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int hinge_loss_row_major(cudamat* mat, cudamat* labels, cudamat* target, int quadratic, float margin) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = (h + 31) / 32;
if (quadratic == 1) {
hipLaunchKernelGGL(( kHingeQuadraticRowMajor), dim3(num_blocks), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, margin);
} else {
hipLaunchKernelGGL(( kHingeLinearRowMajor), dim3(num_blocks), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, margin);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_grad_bbox(
cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset,
cudamat* height_offset, cudamat* target, int width, int height, int depth,
float scale_width, float scale_height, int loss_function) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (loss_function == 0) {
//int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
//int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(width, height, depth);
dim3 threads(h, 1, 1);
hipLaunchKernelGGL(( kBoundingBoxLogisticGrad), dim3(grid), dim3(threads), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
} else {
hipLaunchKernelGGL(( kBoundingBoxSoftMaxGrad), dim3(NUM_VECTOR_OP_BLOCKS), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrect), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectRowMajor), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !indices->on_device || !labels->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] * indices->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectCLS), dim3(gridDim), dim3(32), 0, 0, mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height) {
unsigned int h = mat->size[0] * width * height;
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
hipLaunchKernelGGL(( kSoftMaxCorrectBoundingBox), dim3(gridDim), dim3(32), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, mat->size[0], width,
height, depth, scale_width, scale_height, target->data_device);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_logistic_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, float cutoff) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(grid_x, grid_y, depth);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
hipLaunchKernelGGL(( kLogisticCorrectBoundingBox), dim3(grid), dim3(threads), 0, 0,
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAccumulateColumns), dim3(h), dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxCrossEntropy), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy_row_major(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != h || labels->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kSoftMaxCrossEntropyRowMajor), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpand), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, indices->data_device, target->data_device, h, w, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kExpandAndAdd), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0, source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int adagrad(cudamat* history, cudamat* grad, float delta) {
int len = history->size[0] * history->size[1];
int trans = history->is_trans;
if (!history->on_device || !grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kAdagrad), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
history->data_device, grad->data_device, delta, len);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int rms_prop(cudamat* history, cudamat* grad, float factor) {
int len = history->size[0] * history->size[1];
int trans = history->is_trans;
if (!history->on_device || !grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
hipLaunchKernelGGL(( kRMSProp), dim3(NUM_VECTOR_OP_BLOCKS),dim3(NUM_VECTOR_OP_THREADS_PER_BLOCK), 0, 0,
history->data_device, grad->data_device, factor, len);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
#ifdef __cplusplus
}
#endif
| e268c82c7b9e3db48a5a7d472f6d64bdbade9295.cu | /*
Copyright (c) 2009,2010, Volodymyr Mnih
All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted provided that
the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the
following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or
promote products derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
*/
#include <stdio.h>
#include <stdlib.h>
#include <cublas.h>
#include <math.h>
#include "rnd_multipliers_32bit.h"
#include "cudamat_kernels.cuh"
#ifdef __cplusplus
extern "C" {
#endif
#include "cudamat.cuh"
/* ------------------------------ CUBLAS init/shutdown ------------------------------ */
inline bool check_cublas_error() {
cublasStatus status = cublasGetError();
return status != CUBLAS_STATUS_SUCCESS;
}
inline bool checkCUDAError() {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err)
printf("%s\n", cudaGetErrorString( err));
return cudaSuccess != err;
}
const char* get_last_cuda_error() {
cudaError_t err = cudaGetLastError();
return cudaGetErrorString( err);
}
int cublas_init() {
cublasInit();
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int cublas_shutdown() {
cublasShutdown();
cudaThreadExit();
return 0;
}
int cuda_record_event(cudaEvent_t* t) {
cudaError_t err = cudaEventRecord(*t, 0);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_synchronize_event(cudaEvent_t* t) {
//cudaError_t err = cudaEventSynchronize(*t);
cudaError_t err = cudaStreamWaitEvent(NULL, *t, 0);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_create_event(cudaEvent_t* t) {
//cudaError_t err = cudaEventCreateWithFlags(t, cudaEventBlockingSync);
cudaError_t err = cudaEventCreate(t);
if (cudaSuccess != err) {
printf("%s\n", cudaGetErrorString( err));
}
return cudaSuccess != err;
}
int cuda_set_device(int deviceId) {
cudaSetDevice(deviceId);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
bool cuda_is_fermi(int deviceId) {
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, deviceId);
return prop.major >= 2;
}
int cuda_set_P2P(int gpu1, int gpu2) {
bool is_fermi = cuda_is_fermi(gpu1) && cuda_is_fermi(gpu2);
int access2from1, access1from2;
cudaDeviceCanAccessPeer(&access2from1, gpu1, gpu2);
cudaDeviceCanAccessPeer(&access1from2, gpu2, gpu1);
//printf("%d can access %d : %d\n ", gpu1, gpu2, access2from1);
//printf("%d can access %d : %d\n ", gpu2, gpu1, access1from2);
bool same_complex = false;
if(access2from1==1 && access1from2==1) same_complex = true;
if(is_fermi && same_complex) {
cudaSetDevice(gpu1);
cudaDeviceEnablePeerAccess(gpu2, 0); //second argument is flags
cudaSetDevice(gpu2);
cudaDeviceEnablePeerAccess(gpu1, 0); //second argument is flags
return 0;
} else {
return CUDA_ERROR;
}
}
int destroy_tex(cudamat* mat) {
if (mat->tex_obj != 0) {
cudaError_t err = cudaDestroyTextureObject(mat->tex_obj);
if (cudaSuccess != err) {
mat->tex_obj = 0;
return 0;
} else {
return CUDA_ERROR;
}
}
return 0;
}
int init_random(rnd_struct* rnd_state, int seed) {
unsigned int * host_mults;
host_mults = (unsigned int*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
for (int i = 0; i < NUM_RND_STREAMS; i++) {
host_mults[i] = _rand_words[i];
}
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned int), (void**)&rnd_state->dev_mults);
cublasAlloc(NUM_RND_STREAMS, sizeof(unsigned long long), (void**)&rnd_state->dev_words);
cublasSetVector(NUM_RND_STREAMS, sizeof(unsigned int), host_mults, 1, rnd_state->dev_mults, 1);
free(host_mults);
cudaDeviceSynchronize();
kSeedRandom<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, seed);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
// Allocates and gives up ownership of pointer. Caller must free.
int get_rnd_state(rnd_struct* rnd_state, unsigned long long* host_words_out, int *size_out) {
*size_out = NUM_RND_STREAMS;
host_words_out = (unsigned long long*)malloc(NUM_RND_STREAMS * sizeof(unsigned int));
if (host_words_out == NULL) {
return ERROR_GENERIC; // Out of memory.
}
cublasGetVector(NUM_RND_STREAMS, sizeof(unsigned long long), rnd_state->dev_words, 1, host_words_out, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
/* ------------------------------ Utility routines ------------------------------ */
int get_leading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[1] : mat->size[0];
}
int get_nonleading_dimension(cudamat* mat) {
return mat->is_trans ? mat->size[0] : mat->size[1];
}
void set_transpose(cudamat* mat, int is_trans) {
mat->is_trans = is_trans;
}
inline char get_transpose_char(cudamat* mat) {
return mat->is_trans ? 't' : 'n';
}
void cuda_sync_threads() {
cudaDeviceSynchronize();
}
/* ------------------------------ Allocating/moving data ------------------------------ */
int allocate_device_memory(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
cublasStatus stat;
stat = cublasAlloc(len, sizeof(mat->data_device[0]), (void**)&mat->data_device);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_bbox(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
cublasStatus stat;
stat = cublasAlloc(size, sizeof(int), (void**)&mat->data_device.seg);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(numboxes, sizeof(int), (void**)&mat->data_device.labels);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(4 * numboxes, sizeof(int), (void**)&mat->data_device.boxes);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int allocate_device_memory_sparse(cudamat_sparse* mat) {
int nnz = mat->nnz, rows = mat->size[0];
cublasStatus stat;
stat = cublasAlloc(nnz, sizeof(mat->data_device.data[0]), (void**)&mat->data_device.data);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(nnz, sizeof(mat->data_device.indices[0]), (void**)&mat->data_device.indices);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
stat = cublasAlloc(rows + 1, sizeof(mat->data_device.indptr[0]), (void**)&mat->data_device.indptr);
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error()) {
checkCUDAError();
return CUBLAS_ERROR;
}
mat->on_device = 1;
return 0;
}
int copy_to_host_slice(cudamat* mat, size_t start, size_t end) {
if (start >= end || end > mat->size[1])
return ERROR_GENERIC;
size_t len = mat->size[0] * (end - start);
size_t offset = mat->size[0] * start;
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device + offset, 1, mat->data_host + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_to_host(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
if (mat->on_device) {
cublasGetVector(len, sizeof(mat->data_host[0]), mat->data_device, 1, mat->data_host, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
} else
return ERROR_NOT_ON_DEVICE;
return 0;
}
int copy_bbox_to_host(cudamat_bbox* mat) {
if (mat->on_device) {
cublasGetVector(mat->size, sizeof(int), mat->data_device.seg, 1, mat->data_host.seg, 1);
cublasGetVector(mat->numboxes, sizeof(int), mat->data_device.labels, 1, mat->data_host.labels, 1);
cublasGetVector(4 * mat->numboxes, sizeof(int), mat->data_device.boxes, 1, mat->data_host.boxes, 1);
if (check_cublas_error()) return CUBLAS_ERROR;
} else {
return ERROR_NOT_ON_DEVICE;
}
return 0;
}
int copy_to_device_slice(cudamat* mat, size_t start, size_t end) {
if (end <= start || end > mat->size[1])
return ERROR_GENERIC;
size_t len = mat->size[0] * (end - start);
int err_code = 0;
size_t offset = mat->size[0] * start;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host + offset, 1, mat->data_device + offset, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_to_device(cudamat* mat) {
size_t len = mat->size[0]*mat->size[1];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host[0]), mat->data_host, 1, mat->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_bbox_to_device(cudamat_bbox* mat) {
int size = mat->size;
int numboxes = mat->numboxes;
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_bbox(mat);
if (err_code)
return err_code;
}
cublasSetVector(size, sizeof(int), mat->data_host.seg, 1, mat->data_device.seg, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(numboxes, sizeof(int), mat->data_host.labels, 1, mat->data_device.labels, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(4 * numboxes, sizeof(int), mat->data_host.boxes, 1, mat->data_device.boxes, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int copy_sparse_to_device(cudamat_sparse* mat) {
int len = mat->nnz, rows = mat->size[0];
int err_code = 0;
//if (!mat->owns_data)
// return VIEW_ERROR;
if (!mat->on_device) {
err_code = allocate_device_memory_sparse(mat);
if (err_code)
return err_code;
}
cublasSetVector(len, sizeof(mat->data_host.data[0]), mat->data_host.data, 1, mat->data_device.data, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(len, sizeof(mat->data_host.indices[0]), mat->data_host.indices, 1, mat->data_device.indices, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
cublasSetVector(rows + 1, sizeof(mat->data_host.indptr[0]), mat->data_host.indptr, 1, mat->data_device.indptr, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
// mat 1 : source
// mat 2 : dest
int copy_on_device(cudamat* mat1, cudamat* mat2) {
int len = mat1->size[0]*mat1->size[1];
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cudaMemcpy(mat2->data_device, mat1->data_device, len * sizeof(float), cudaMemcpyDefault);
//cublasScopy(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int copy_on_device_p2p_async(cudamat* src, cudamat* dst, int src_dev, int dst_dev) {
int len = src->size[0]*src->size[1];
if (src->size[0] != dst->size[0] || src->size[1] != dst->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cudaMemcpyPeerAsync(dst->data_device, dst_dev, src->data_device, src_dev, len * sizeof(float));
if (check_cublas_error())
return CUBLAS_ERROR;
else
return 0;
}
int get_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = source->size[0];
int width = source->size[1];
if ((end - start) != target->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kGetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int set_row_slice(cudamat* source, cudamat* target, unsigned int start, unsigned int end) {
int height = target->size[0];
int width = target->size[1];
if ((end - start) != source->size[0] || source->size[1] != target->size[1] || start >= end || end > height)
return ERROR_INCOMPATIBLE_DIMENSIONS;
dim3 kernelBlockGrid((int)ceil((end - start)/32.), (int)ceil(width/32.), 1);
dim3 kernelBlockDim(32, 1, 1);
kSetRowSlice<<<kernelBlockGrid,kernelBlockDim>>>(source->data_device, target->data_device, start, end, width, height);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose_big_matrix(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTransposeBig<<< NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int copy_transpose(cudamat* source, cudamat* target) {
unsigned int height = source->size[0];
unsigned int width = source->size[1];
if (source->size[0] != target->size[1] || source->size[1] != target->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
// setup execution parameters
unsigned int grid_x = height / COPY_BLOCK_SIZE;
if (height % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = width / COPY_BLOCK_SIZE;
if (width % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kTranspose<<< grid, threads >>>(target->data_device, source->data_device, height, width);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int free_device_memory(cudamat* mat) {
if (mat->owns_data && mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int free_device_memory_bbox(cudamat_bbox* mat) {
if (mat->on_device) {
cublasStatus stat;
stat = cublasFree(mat->data_device.seg);
stat = cublasFree(mat->data_device.labels);
stat = cublasFree(mat->data_device.boxes);
mat->on_device = 0;
if (stat != CUBLAS_STATUS_SUCCESS || check_cublas_error())
return CUBLAS_ERROR;
}
return 0;
}
int set_shape(cudamat* mat, unsigned int m, unsigned int n) {
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int set_shape4d(Shape4D* shape, unsigned int s1, unsigned int s2, unsigned s3, unsigned s4) {
shape->shape[0] = s1;
shape->shape[1] = s2;
shape->shape[2] = s3;
shape->shape[3] = s4;
return 0;
}
int reshape(cudamat* mat, int m, int n) {
if (m < 0 && n < 0)
return ERROR_GENERIC;
if (m < 0)
m = (mat->size[0] * mat->size[1]) / n;
if (n < 0)
n = (mat->size[0] * mat->size[1]) / m;
if (mat->size[0] * mat->size[1] != m * n)
return ERROR_INCOMPATIBLE_DIMENSIONS;
mat->size[0] = m;
mat->size[1] = n;
return 0;
}
int get_slice(cudamat* source, cudamat* target, unsigned int first_col, unsigned int last_col) {
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (last_col > source->size[1] || (first_col >= last_col))
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = source->data_host + first_col * num_rows;
target->data_device = source->data_device + first_col * num_rows;
target->on_device = 1;
target->on_host = 0;
target->size[0] = source->size[0];
target->size[1] = last_col - first_col;
target->is_trans = 0;
target->owns_data = 0;
target->tex_obj = 0;
return 0;
}
int get_vector_slice(cudamat* source, cudamat* target, unsigned int first_ind, unsigned int last_ind) {
// source must be a vector.
if (source->size[0] > 1 && source->size[1] > 1)
return ERROR_GENERIC;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (!source->on_device)
return ERROR_NOT_ON_DEVICE;
if (first_ind >= last_ind)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_rows = source->size[0];
target->data_host = 0;
target->data_device = source->data_device + first_ind * num_rows;
target->on_device = 1;
target->on_host = 0;
target->is_trans = 0;
target->owns_data = 0;
if (source->size[0] > 1) {
if (last_ind > source->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = last_ind - first_ind;
target->size[1] = 1;
} else {
if (last_ind > source->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
target->size[0] = 1;
target->size[1] = last_ind - first_ind;
}
return 0;
}
/* ------------------------------ Initialization routines ------------------------------ */
void init_from_array(cudamat* mat, float* data, int m, int n) {
mat->data_host = data;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->tex_obj = 0;
}
void init_from_sparse_array(cudamat_sparse* mat, float* data, int* indices, int* indptr, int m, int n, int nnz) {
mat->data_host.data = data;
mat->data_host.indices = indices;
mat->data_host.indptr = indptr;
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 1;
mat->is_trans = 0;
mat->owns_data = 1;
mat->nnz = nnz;
}
void set_on_device(cudamat* mat) {
mat->on_device = 1;
}
int init_empty(cudamat* mat, int m, int n) {
mat->size[0] = m;
mat->size[1] = n;
mat->on_device = 0;
mat->on_host = 0;
mat->is_trans = 0;
mat->owns_data = 1;
mat->tex_obj = 0;
return allocate_device_memory(mat);
}
/* ------------------------------ Random number generation ------------------------------ */
int fill_with_rand(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomUniform<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int fill_with_randn(rnd_struct* rnd_state, cudamat* mat) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulli<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_bernoulli_tanh(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleBernoulliTanh<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_poisson(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSamplePoisson<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int sample_gaussian(rnd_struct* rnd_state, cudamat* mat, cudamat* target, float mult) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kSampleGaussian<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len, mult);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_energy(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbEnergy<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int perturb_prob(rnd_struct* rnd_state, cudamat* mat, cudamat* target) {
int len = mat->size[0] * mat->size[1];
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kPerturbProb<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int dropout(rnd_struct* rnd_state, cudamat* mat, float dropprob, float val, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, dropprob, val, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int gaussian_dropout(rnd_struct* rnd_state, cudamat* mat, float scale) {
int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kRandomGaussianDropout<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(rnd_state->dev_mults, rnd_state->dev_words, mat->data_device, len, scale);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
/* ------------------------------ Algebraic operations ------------------------------ */
int add_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
kAddColVector<<<num_blocks,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int add_col_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
kAddColMult<<<num_blocks,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_to_each_pixel(cudamat* mat1, cudamat* mat2, cudamat* target, float mult) {
unsigned int h = mat1->size[0],
w = mat1->size[1],
num_colors = mat2->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans || mat2->is_trans)
return ERROR_TRANSPOSED;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] % mat2->size[1] != 0 ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddToEachPixel<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, mult, w, h, w / num_colors);
if (checkCUDAError()) {
return CUDA_ERROR;
}
return 0;
}
int mult_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal_scalar(cudamat* mat, float val, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonalScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_diagonal(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[1] * vec->size[0] ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddDiagonal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_mult(cudamat* mat, cudamat* vec, cudamat* target, float mult) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
kAddRowMult<<<num_blocks,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, mult, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int num_blocks = DIVUP((w * h), (NUM_VECTOR_OP_LOOPS_PER_THREAD * NUM_VECTOR_OP_THREADS_PER_BLOCK));
num_blocks = MIN(NUM_VECTOR_OP_BLOCKS, num_blocks);
kAddRowVector<<<num_blocks,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int mult_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_col_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[0] != vec->size[0] || vec->size[1] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByColVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int div_by_row_vec(cudamat* mat, cudamat* vec, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !vec->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (mat->size[1] != vec->size[1] || vec->size[0] != 1 ||
mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivByRowVector<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, vec->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int less_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLessThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEq<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThan<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBound<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_eq_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanEqScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int greater_than_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kGreaterThanScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int upper_bound_mod_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kUpperBoundModScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int lower_bound_scalar(cudamat* mat, float val, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLowerBoundScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, val, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_and_accumulate(cudamat* mat, cudamat* acc) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !acc->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (acc->size[0] != mat->size[0] || acc->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxAndAccumulate<<<gridDim,32>>>(mat->data_device, acc->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int choose_max_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kChooseMaxColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int argmax_by_axis(cudamat* mat, cudamat* target, int axis) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kArgMaxColumnwise<<<gridDim,32>>>(mat->data_device, target->data_device, w, h);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sqsum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
kSqSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = (h + h1 - 1) / h1;
dim3 gridDim(h1, h2, 1);
kSqSumRowwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// Avoids using ones vector.
float sum_all(cudamat* mat, int* err_code) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
const int max_num_blocks = 8; // Should be around max number of concurrent blocks.
float res_host[max_num_blocks];
float* res_device;
int num_blocks = MIN(max_num_blocks, DIVUP(len, 4 * NUM_VECTOR_OP_THREADS_PER_BLOCK));
cudaMalloc((void**)&res_device, num_blocks * sizeof(float));
int shared_mem_size = NUM_VECTOR_OP_THREADS_PER_BLOCK * sizeof(float) ;
int left_over = len % num_blocks;
int len_per_block = len / num_blocks;
kSumAll<<<num_blocks, NUM_VECTOR_OP_THREADS_PER_BLOCK, shared_mem_size>>>(mat->data_device, res_device, len, len_per_block, left_over);
cudaMemcpy(res_host, res_device, num_blocks * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(res_device);
float val = 0;
for (int i = 0; i < num_blocks; i++) val += res_host[i];
if (checkCUDAError())
*err_code = CUDA_ERROR;
return val;
}
int sum_by_axis(cudamat* mat, cudamat* target, int axis, float mult, float p) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (axis == 0) {
if (target->size[0] != 1 || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = (w + w1 - 1) / w1;
dim3 gridDim(w1, w2, 1);
kSumColumnwise<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h, mult, p);
} else if (axis == 1) {
if (target->size[1] != 1 || target->size[0] != mat->size[0])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(h, NUM_VECTOR_OP_THREADS_PER_BLOCK);
int h1 = floor(sqrt(num_blocks));
int h2 = DIVUP(num_blocks, h1);
dim3 gridDim(h1, h2, 1);
kSumRowwise<<<gridDim, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, w, h, mult, p);
} else
return ERROR_UNSUPPORTED;
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int normlimit_by_axis(cudamat* mat, cudamat* target, int axis,
float norm, int constraint) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != mat->size[0] || target->size[1] != mat->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
if (axis == 0) {
int w1 = floor(sqrt(w));
int w2 = DIVUP(w, w1);
dim3 gridDim(w1, w2, 1);
kNormLimitColumnwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h, constraint);
} else {
int h1 = floor(sqrt(h));
int h2 = DIVUP(h, h1);
dim3 gridDim(h1, h2, 1);
kNormLimitRowwise<<<gridDim,32, shared_mem_size>>>(mat->data_device, target->data_device, norm, w, h, constraint);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int sign(cudamat* mat, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans != target->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyCos<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySin<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sigmoid(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplySigmoid<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyTanh<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_abs(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyAbs<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log_1_plus_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kApplyLog1PlusExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// target = 2 / (1 + exp(-mat * lambda)) - 1
int apply_relu_squash(cudamat* mat, cudamat* target, float lambda) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSquashRelu<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, lambda);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_log(cudamat* mat, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLog<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_exp(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_ceil(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCeil<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_floor(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kFloor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sqrt(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSqrt<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow(cudamat* mat, float pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPow<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_pow_matrix(cudamat* mat, cudamat* pow, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kPowMatrix<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy(cudamat* dist1, cudamat* dist2, cudamat* target, float tiny) {
unsigned int len = dist1->size[0] * dist1->size[1];
if (!dist1->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (dist1->size[0] != target->size[0] || dist1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (dist1->size[0] != dist2->size[0] || dist1->size[1] != dist2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(dist1->data_device, dist2->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int compute_cross_entropy_bernoulli(cudamat* mat, cudamat* pow, cudamat* target, float tiny) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCrossEntropyBernoulli<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int correct_preds(cudamat* mat, cudamat* pow, cudamat* target, float cutoff) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCorrectPreds<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, pow->data_device, target->data_device, len, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int reciprocal(cudamat* mat, cudamat* target) {
unsigned int len = mat->size[0] * mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kReciprocal<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// target = beta * target + alpha * mat1 * mat2
int dot(cudamat* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = get_leading_dimension(mat1),
k2 = get_nonleading_dimension(mat1),
k = get_leading_dimension(mat2),
n = get_nonleading_dimension(mat2),
m2 = get_leading_dimension(target),
n2 = get_nonleading_dimension(target);
if (m != m2 || n != n2 || k != k2) {
printf("%d %d %d %d %d %d\n", m, k2, k, n, m2, n2);
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
cublasSgemm(get_transpose_char(mat1), get_transpose_char(mat2),
m, n, k,
alpha, mat1->data_device, mat1->size[0],
mat2->data_device, mat2->size[0],
beta, target->data_device, target->size[0]);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int sparse_dot(cudamat_sparse* mat1, cudamat* mat2, cudamat* target, float beta, float alpha) {
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
int m = mat1->size[0],
k = mat1->size[1],
k2 = mat2->size[0],
n = mat2->size[1];
if (k != k2) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
unsigned int grid_x = m / COPY_BLOCK_SIZE;
if (m % COPY_BLOCK_SIZE)
grid_x++;
unsigned int grid_y = n / COPY_BLOCK_SIZE;
if (n % COPY_BLOCK_SIZE)
grid_y++;
dim3 grid(grid_x, grid_y, 1);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kSparseDot<<<grid, threads>>>(m, n, k, mat1->data_device.data,
mat1->data_device.indptr,
mat1->data_device.indices,
mat2->data_device, target->data_device, beta, alpha);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
float vdot(cudamat* mat1, cudamat* mat2, int* err_code) {
int len = mat1->size[0]*mat1->size[1];
float res;
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans) {
*err_code = ERROR_TRANSPOSEDNESS;
return 0;
}
if (mat2->size[0] * mat2->size[1] != len) {
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
return 0;
}
res = cublasSdot(len, mat1->data_device, 1, mat2->data_device, 1);
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
/* Perform the operation mat1 = mat1 + alpha * mat2. mat1 and mat2 must
have the same transposedness. */
int add_mult(cudamat* mat1, cudamat* mat2, float alpha) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
cublasSaxpy(len, alpha, mat2->data_device, 1, mat1->data_device, 1);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_mult_sign(cudamat* mat1, cudamat* mat2, float mult) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddMultSign<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, len, mult);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int add_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int subtract_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSubtract<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivide<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
/* Elementwise multiplication of 2 matrices */
int mult_elementwise(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMult<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_sin_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSinDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_cos_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kCosDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_logistic_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out_grad - output gradient
int apply_logistic_grad(cudamat* mat1, cudamat* mat2, cudamat* out_grad) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !out_grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out_grad->size[0] || mat1->size[1] != out_grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kLogisticGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, out_grad->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
// mat1 - output of network
// mat2 - target
// out - .
int get_logistic_correct_normalized(cudamat* mat1, cudamat* mat2, cudamat* out) {
if (!mat1->on_device || !mat2->on_device || !out->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != out->size[0] || 1 != out->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = DIVUP(mat1->size[0], NUM_VECTOR_OP_THREADS_PER_BLOCK);
kLogisticCorrectNormalized<<<num_blocks, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, out->data_device, mat1->size[0], mat1->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_tanh_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kTanhDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_rectified_linear_smooth_deriv(cudamat* mat1, cudamat* mat2, cudamat* target) {
int len = mat1->size[0]*mat1->size[1];
if (!mat1->on_device || !mat2->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat1->is_trans != mat2->is_trans)
return ERROR_TRANSPOSEDNESS;
if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] ||
mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRectifiedLinearSmoothDeriv<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat1->data_device, mat2->data_device, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int assign_scalar(cudamat* mat, float alpha) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
kAssignScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int write_at(cudamat* mat, int row, int col, float val) {
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (row >= mat->size[0] || col >= mat->size[1] || row < 0 || col < 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
cudaMemcpy(mat->data_device + col * mat->size[0] + row, &val, sizeof(float), cudaMemcpyHostToDevice);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float read_from(cudamat* mat, int row, int col, int* err_code) {
*err_code = 0;
if (!mat->on_device)
*err_code = ERROR_NOT_ON_DEVICE;
if (row >= mat->size[0] || col >= mat->size[1] || row < 0 || col < 0)
*err_code = ERROR_INCOMPATIBLE_DIMENSIONS;
float val = 0;
cudaMemcpy(&val, mat->data_device + col * mat->size[0] + row, sizeof(float), cudaMemcpyDeviceToHost);
if (checkCUDAError())
*err_code = CUDA_ERROR;
return val;
}
int mult_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kMultScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int divide_by_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kDivideScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int add_scalar(cudamat* mat, float alpha, cudamat* target) {
int len = mat->size[0]*mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAddScalar<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, alpha, target->data_device, len);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
float euclid_norm(cudamat* mat, int* err_code) {
int len = mat->size[0]*mat->size[1];
float res = cublasSnrm2(len, mat->data_device, 1);
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (check_cublas_error()) {
*err_code = CUBLAS_ERROR;
return -1.;
} else {
*err_code = 0;
return res;
}
}
int selectRows(cudamat* source, cudamat* target, cudamat* indices){
const int nRetRows = indices->size[1];
if (nRetRows==0) return 0;
dim3 gridDim((nRetRows+31)/32);
dim3 blockDim(32);
kSelectRows<<<gridDim, blockDim>>>(source->data_device, target->data_device, indices->data_device, nRetRows, source->size[0], source->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int swapColumns(cudamat* source, cudamat* target, cudamat* indices1, cudamat* indices2){
const int cols = indices1->size[1]*indices1->size[0],
h = source->size[0],
w = source->size[1];
kSwapColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, target->data_device, indices1->data_device, indices2->data_device, cols, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int shuffleColumns(cudamat* source, cudamat* rand_perm_indices) {
const int h = source->size[0],
w = source->size[1];
if (rand_perm_indices->size[0] != 1 || rand_perm_indices->size[1] != w) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
kShuffleColumns<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, source->data_device, rand_perm_indices->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int setSelectedRows(cudamat* target, cudamat* source, cudamat* indices){
const int nSetRows = indices->size[1];
if (nSetRows==0)
return 0;
dim3 gridDim((nSetRows+31)/32);
dim3 blockDim(32);
kSetSelectedRows<<<gridDim, blockDim>>>(target->data_device, source->data_device, indices->data_device, nSetRows, target->size[0], target->size[1]);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int generate_translations_big_var_off(cudamat* source, cudamat* target, cudamat* off_x, cudamat* off_y, int source_w, int target_w, int num_channels) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kGenerateTranslationsBigVarOff<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, off_x->data_device, off_y->data_device, source_w, target_w, num_channels);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int extract_patches(cudamat* images, cudamat* patches, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int img_width, int img_height, int patch_width, int patch_height) {
int num_images = images->size[1];
int num_colors = images->size[0] / (img_width * img_height);
if (patches->size[1] != num_colors * patch_width * patch_height || patches->size[0] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
unsigned int grid_y = patch_height / COPY_BLOCK_SIZE;
if (patch_height % COPY_BLOCK_SIZE)
grid_y++;
unsigned int grid_x = patch_width / COPY_BLOCK_SIZE;
if (patch_width % COPY_BLOCK_SIZE)
grid_x++;
dim3 grid(grid_x, grid_y, num_images * num_colors);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE);
kExtractPatches2<<<grid, threads>>>(
images->data_device, patches->data_device, width_offset->data_device,
height_offset->data_device, flip->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
//*/
/*
kExtractPatches<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
images->data_device, patches->data_device, indices->data_device, width_offset->data_device,
height_offset->data_device, num_images, img_width, img_height,
patch_width, patch_height, num_colors);
*/
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int rectify_bounding_boxes(cudamat* boxes, cudamat* width_offset, cudamat* height_offset, cudamat* flip, int patch_width, int patch_height) {
int num_images = boxes->size[0];
if (width_offset->size[0] * width_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (height_offset->size[0] * height_offset->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (flip->size[0] * flip->size[1] != num_images)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_locs = boxes->size[1] / 4;
dim3 grid(MIN(NUM_VECTOR_OP_BLOCKS, num_locs));
dim3 threads(MIN(NUM_VECTOR_OP_THREADS_PER_BLOCK, num_images));
kRectifyBoundingBox<<<grid, threads>>>(
boxes->data_device, width_offset->data_device, height_offset->data_device,
flip->data_device, num_images, patch_width, patch_height, num_locs);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int blockify(cudamat* source, cudamat* target, int blocksize) {
dim3 kernelBlockGrid(source->size[1], 1, 1);
dim3 kernelBlockDim(512, 1, 1);
kBlockify<<<kernelBlockGrid, kernelBlockDim>>>(source->data_device, target->data_device, source->size[0], blocksize);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax(cudamat* mat, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMax<<<gridDim, 32, shared_mem_size>>>(mat->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_overwrite(cudamat* mat) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxOverwrite<<<gridDim, 32, shared_mem_size>>>(mat->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int softmax_row_major(cudamat* mat) {
return softmax_row_major_multi(mat, mat->size[1]);
}
int softmax_row_major_multi(cudamat* mat, int numslices) {
unsigned int len = mat->size[0] * mat->size[1];
unsigned int h = len / numslices;
if (len % numslices != 0)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (!mat->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
int shared_mem_size = 32 * sizeof(float) ;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxRowMajor<<<gridDim, 32, shared_mem_size>>>(mat->data_device, numslices, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGradCLS<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_softmax_grad_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxGradRowMajor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int hinge_loss_row_major(cudamat* mat, cudamat* labels, cudamat* target, int quadratic, float margin) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int num_blocks = (h + 31) / 32;
if (quadratic == 1) {
kHingeQuadraticRowMajor<<<num_blocks, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h, margin);
} else {
kHingeLinearRowMajor<<<num_blocks, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h, margin);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int apply_grad_bbox(
cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset,
cudamat* height_offset, cudamat* target, int width, int height, int depth,
float scale_width, float scale_height, int loss_function) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (loss_function == 0) {
//int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
//int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(width, height, depth);
dim3 threads(h, 1, 1);
kBoundingBoxLogisticGrad<<<grid, threads>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
} else {
kBoundingBoxSoftMaxGrad<<<NUM_VECTOR_OP_BLOCKS, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device);
}
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int w1 = floor(sqrt(w));
int w2 = w / w1 + (w % w1 == 0 ? 0 : 1);
dim3 gridDim(w1, w2, 1);
kSoftMaxCorrect<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major(cudamat* mat, cudamat* labels, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] * labels->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectRowMajor<<<gridDim, 32>>>(mat->data_device, labels->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_CLS(cudamat* mat, cudamat_bbox* labels, cudamat* indices, cudamat* target) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !indices->on_device || !labels->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] * indices->size[1] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectCLS<<<gridDim, 32>>>(mat->data_device, labels->data_device.labels, indices->data_device, target->data_device, w, h);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height) {
unsigned int h = mat->size[0] * width * height;
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] * target->size[1] != h) {
return ERROR_INCOMPATIBLE_DIMENSIONS;
}
int h1 = floor(sqrt(h));
int h2 = h / h1 + (h % h1 == 0 ? 0 : 1);
dim3 gridDim(h1, h2, 1);
kSoftMaxCorrectBoundingBox<<<gridDim, 32>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, mat->size[0], width,
height, depth, scale_width, scale_height, target->data_device);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_logistic_correct_row_major_bbox(cudamat* mat, cudamat_bbox* bbox, cudamat* indices, cudamat* width_offset, cudamat* height_offset, cudamat* target, int width, int height, int depth, float scale_width, float scale_height, float cutoff) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device || !bbox->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
int grid_y = DIVUP(height, COPY_BLOCK_SIZE);
int grid_x = DIVUP(width, COPY_BLOCK_SIZE) * h;
dim3 grid(grid_x, grid_y, depth);
dim3 threads(COPY_BLOCK_SIZE, COPY_BLOCK_SIZE, 1);
kLogisticCorrectBoundingBox<<<grid, threads>>>(
mat->data_device, bbox->data_device.boxes, bbox->data_device.labels,
bbox->data_device.seg, indices->data_device, width_offset->data_device,
height_offset->data_device, h, width, height, depth, scale_width,
scale_height, target->data_device, cutoff);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int accumulate_columns(cudamat* mat, cudamat* indices, cudamat* target, float mult, int avg) {
unsigned int h = mat->size[0],
w = mat->size[1],
w2 = target->size[1];
if (!mat->on_device || !indices->on_device|| !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (NUM_VECTOR_OP_THREADS_PER_BLOCK < w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAccumulateColumns<<<h, NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, indices->data_device, target->data_device, w, w2, h, mult, avg);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != 1 || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != 1 || labels->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropy<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int get_softmax_cross_entropy_row_major(cudamat* mat, cudamat* labels, cudamat* target, float tiny) {
unsigned int h = mat->size[0],
w = mat->size[1];
if (!mat->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (labels->size[0] != h || labels->size[1] != 1)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kSoftMaxCrossEntropyRowMajor<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(mat->data_device, labels->data_device, target->data_device, w, h, tiny);
if (checkCUDAError())
return CUDA_ERROR;
return 0;
}
int expand(cudamat* source, cudamat* indices, cudamat* target){
unsigned int h = source->size[0],
w = source->size[1],
w2 = target->size[1];
if (!source->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (source->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w2)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpand<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, indices->data_device, target->data_device, h, w, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int expand_and_add(cudamat* source, cudamat* mat, cudamat* indices, cudamat* target, float mult){
unsigned int h = source->size[0],
w = source->size[1],
w2 = mat->size[1];
if (!source->on_device || !mat->on_device || !indices->on_device || !target->on_device)
return ERROR_NOT_ON_DEVICE;
if (mat->is_trans)
return ERROR_TRANSPOSED;
if (target->size[0] != h || target->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (indices->size[0] != 1 || indices->size[1] != w)
return ERROR_INCOMPATIBLE_DIMENSIONS;
if (mat->size[0] != h)
return ERROR_INCOMPATIBLE_DIMENSIONS;
kExpandAndAdd<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(source->data_device, mat->data_device, indices->data_device, target->data_device, w, h, mult, w2);
if (checkCUDAError())
return CUDA_ERROR;
else
return 0;
}
int adagrad(cudamat* history, cudamat* grad, float delta) {
int len = history->size[0] * history->size[1];
int trans = history->is_trans;
if (!history->on_device || !grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kAdagrad<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
history->data_device, grad->data_device, delta, len);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
int rms_prop(cudamat* history, cudamat* grad, float factor) {
int len = history->size[0] * history->size[1];
int trans = history->is_trans;
if (!history->on_device || !grad->on_device)
return ERROR_NOT_ON_DEVICE;
if (trans != grad->is_trans)
return ERROR_TRANSPOSEDNESS;
if (len != grad->size[0] * grad->size[1])
return ERROR_INCOMPATIBLE_DIMENSIONS;
kRMSProp<<<NUM_VECTOR_OP_BLOCKS,NUM_VECTOR_OP_THREADS_PER_BLOCK>>>(
history->data_device, grad->data_device, factor, len);
if (check_cublas_error())
return CUBLAS_ERROR;
return 0;
}
#ifdef __cplusplus
}
#endif
|
4efae5e87e5b963951a5da84b892783144d5e7ea.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// backward kernel function: for combiner=sum
template <typename TypeEmbeddingComp>
__global__ void backward_sum_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad[feature_index] = top_grad[feature_index];
}
}
}
__global__ void backward_sum_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const __half *top_grad, __half *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad2[feature_index] = top_grad2[feature_index];
}
}
}
// backward kernel function: for combiner=mean
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void backward_mean_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
float scaler = 1.0f;
if (value_num > 1) {
scaler = 1.0f / value_num; // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
float g = TypeConvertFunc<float, TypeEmbeddingComp>::convert(top_grad[feature_index]);
g *= scaler;
wgrad[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(g);
}
}
}
template <typename TypeKey>
__global__ void backward_mean_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad,
__half *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
__half2 scaler = __float2half2_rn(1.0f);
if (value_num > 1) {
scaler = __float2half2_rn(1.0f / (float)value_num); // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
wgrad2[feature_index] = __hmul2(scaler, top_grad2[feature_index]);
}
}
}
template <typename TypeEmbeddingComp>
void backward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad,
hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_sum_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
template <>
void backward_sum<__half>(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const __half *top_grad, __half *wgrad, hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size =
embedding_vec_size / 2; // each thread corresponds to one element in an embedding vetor
hipLaunchKernelGGL(( backward_sum_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_num, embedding_vec_size / 2, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_sum_kernel), dim3(grid_size), dim3(block_size), 0, stream, batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
}
template <typename TypeKey, typename TypeEmbeddingComp>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad, hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_mean_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
template <typename TypeKey>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad, __half *wgrad,
hipStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
hipLaunchKernelGGL(( backward_mean_align2_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size / 2, row_offset, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
hipLaunchKernelGGL(( backward_mean_kernel), dim3(grid_size), dim3(block_size), 0, stream,
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
}
} // namespace
/**
* backward propagation for DistributedSlotSparseEmbeddingHash
* The first step of backward propagation: computing the wgrad.
* @param batch_size batch size for the current mini-batch computation.
* @param slot_num the number of slots in hash table.
* @param embedding_vec_size embedding vector size.
* @param combiner combiner type: 0-sum, 1-mean
* @param row_offset_allreduce_tensors row_offsets tensors after all_reduce of mulitple GPUs
* @param embedding_feature_tensors embedding features tensors of multiplu GPUs, storing dgrad
* from the top layer
* @param wgrad_tensors wgrad tensors of multi GPUs, the output of this function.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device
*/
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size, size_t slot_num,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num, embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num, embedding_vec_size, row_offset, top_grad, wgrad,
local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size,
const std::vector<size_t> &slot_num_per_gpu,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
if (slot_num_per_gpu[id] == 0) {
continue;
}
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num_per_gpu[id], embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num_per_gpu[id], embedding_vec_size, row_offset, top_grad,
wgrad, local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR | 4efae5e87e5b963951a5da84b892783144d5e7ea.cu | /*
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "HugeCTR/include/embeddings/sparse_embedding_functors.hpp"
#include "HugeCTR/include/utils.cuh"
#include "HugeCTR/include/utils.hpp"
namespace HugeCTR {
namespace {
// backward kernel function: for combiner=sum
template <typename TypeEmbeddingComp>
__global__ void backward_sum_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad[feature_index] = top_grad[feature_index];
}
}
}
__global__ void backward_sum_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const __half *top_grad, __half *wgrad) {
int tid = threadIdx.x;
int bid = blockIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_index = (size_t)(bid * slot_num + i) * embedding_vec_size + tid;
wgrad2[feature_index] = top_grad2[feature_index];
}
}
}
// backward kernel function: for combiner=mean
template <typename TypeKey, typename TypeEmbeddingComp>
__global__ void backward_mean_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
float scaler = 1.0f;
if (value_num > 1) {
scaler = 1.0f / value_num; // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
float g = TypeConvertFunc<float, TypeEmbeddingComp>::convert(top_grad[feature_index]);
g *= scaler;
wgrad[feature_index] = TypeConvertFunc<TypeEmbeddingComp, float>::convert(g);
}
}
}
template <typename TypeKey>
__global__ void backward_mean_align2_kernel(int batch_size, int slot_num, int embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad,
__half *wgrad) {
int bid = blockIdx.x;
int tid = threadIdx.x;
if (bid < batch_size && tid < embedding_vec_size) {
const __half2 *top_grad2 = reinterpret_cast<const __half2 *>(top_grad);
__half2 *wgrad2 = reinterpret_cast<__half2 *>(wgrad);
for (int i = 0; i < slot_num; i++) {
size_t feature_row_index = bid * slot_num + i;
int value_num = row_offset[feature_row_index + 1] - row_offset[feature_row_index];
__half2 scaler = __float2half2_rn(1.0f);
if (value_num > 1) {
scaler = __float2half2_rn(1.0f / (float)value_num); // partial derivatice of MEAN
}
size_t feature_index = feature_row_index * embedding_vec_size + tid;
wgrad2[feature_index] = __hmul2(scaler, top_grad2[feature_index]);
}
}
}
template <typename TypeEmbeddingComp>
void backward_sum(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const TypeEmbeddingComp *top_grad, TypeEmbeddingComp *wgrad,
cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
backward_sum_kernel<<<grid_size, block_size, 0, stream>>>(batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
template <>
void backward_sum<__half>(size_t batch_size, size_t slot_num, size_t embedding_vec_size,
const __half *top_grad, __half *wgrad, cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size =
embedding_vec_size / 2; // each thread corresponds to one element in an embedding vetor
backward_sum_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_num, embedding_vec_size / 2, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
backward_sum_kernel<<<grid_size, block_size, 0, stream>>>(batch_size, slot_num,
embedding_vec_size, top_grad, wgrad);
}
}
template <typename TypeKey, typename TypeEmbeddingComp>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const TypeEmbeddingComp *top_grad,
TypeEmbeddingComp *wgrad, cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
const size_t block_size = embedding_vec_size;
backward_mean_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
template <typename TypeKey>
void backward_mean(size_t batch_size, size_t slot_size, size_t embedding_vec_size,
const TypeKey *row_offset, const __half *top_grad, __half *wgrad,
cudaStream_t stream) {
const size_t grid_size = batch_size; // each block corresponds to a sample
if (embedding_vec_size % 2 == 0) {
const size_t block_size = embedding_vec_size / 2;
backward_mean_align2_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size / 2, row_offset, top_grad, wgrad);
} else {
const size_t block_size = embedding_vec_size;
backward_mean_kernel<<<grid_size, block_size, 0, stream>>>(
batch_size, slot_size, embedding_vec_size, row_offset, top_grad, wgrad);
}
}
} // namespace
/**
* backward propagation for DistributedSlotSparseEmbeddingHash
* The first step of backward propagation: computing the wgrad.
* @param batch_size batch size for the current mini-batch computation.
* @param slot_num the number of slots in hash table.
* @param embedding_vec_size embedding vector size.
* @param combiner combiner type: 0-sum, 1-mean
* @param row_offset_allreduce_tensors row_offsets tensors after all_reduce of mulitple GPUs
* @param embedding_feature_tensors embedding features tensors of multiplu GPUs, storing dgrad
* from the top layer
* @param wgrad_tensors wgrad tensors of multi GPUs, the output of this function.
* @param device_resources all gpus device resources.
* @param context gpu device context, for switching device
*/
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size, size_t slot_num,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num, embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num, embedding_vec_size, row_offset, top_grad, wgrad,
local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template <typename TypeHashKey, typename TypeEmbeddingComp>
void SparseEmbeddingFunctors::backward(size_t batch_size,
const std::vector<size_t> &slot_num_per_gpu,
size_t embedding_vec_size, int combiner,
const Tensors2<TypeHashKey> &row_offset_allreduce_tensors,
const Tensors2<TypeEmbeddingComp> &embedding_feature_tensors,
Tensors2<TypeEmbeddingComp> &wgrad_tensors,
const ResourceManager &resource_manager) {
size_t local_gpu_count = resource_manager.get_local_gpu_count();
CudaDeviceContext context;
for (size_t id = 0; id < local_gpu_count; id++) {
if (slot_num_per_gpu[id] == 0) {
continue;
}
const auto &local_gpu = resource_manager.get_local_gpu(id);
context.set_device(local_gpu->get_device_id());
const TypeEmbeddingComp *top_grad = embedding_feature_tensors[id].get_ptr();
const TypeHashKey *row_offset = row_offset_allreduce_tensors[id].get_ptr();
TypeEmbeddingComp *wgrad = wgrad_tensors[id].get_ptr();
if (combiner == 0) // sum
{
backward_sum(batch_size, slot_num_per_gpu[id], embedding_vec_size, top_grad, wgrad,
local_gpu->get_stream());
} else if (combiner == 1) // mean
{
backward_mean(batch_size, slot_num_per_gpu[id], embedding_vec_size, row_offset, top_grad,
wgrad, local_gpu->get_stream());
} else {
CK_THROW_(Error_t::WrongInput, "Invalid combiner type ");
}
}
return;
}
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, size_t slot_num, size_t embedding_vec_size, int combiner,
const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, float>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<float> &embedding_feature_tensors, Tensors2<float> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<unsigned int, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<unsigned int> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
template void SparseEmbeddingFunctors::backward<long long, __half>(
size_t batch_size, const std::vector<size_t> &slot_num_per_gpu, size_t embedding_vec_size,
int combiner, const Tensors2<long long> &row_offset_allreduce_tensors,
const Tensors2<__half> &embedding_feature_tensors, Tensors2<__half> &wgrad_tensors,
const ResourceManager &resource_manager);
} // namespace HugeCTR |
eb0400dfe2402558d2a3c5d4bbc877afd86c8b2d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Calin Bucur 332CB
// Tema 3 ASC
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "test_map.hpp"
#include "gpu_hashtable.hpp"
using namespace std;
// Number of threads in a block
#define BLOCK_SIZE 256
// Hashing function
// Got it from the internet
// Apparently the distribution is quite uniform for reasons beyond my understanding
__device__ int hash_func(int key) {
int hash = key;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = (hash >> 16) ^ hash;
return hash;
}
// Kernel that rehashes every key-value pair from the old table and inserts it into the new one
__global__ void rehash (Data *old_arr, Data *new_arr, int old_size, int new_size) {
// Get the index of the key-value pair the current thread should rehash
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check if the index is within bounds and if there is a pair at that index
if (idx < old_size && old_arr[idx].key != 0) {
// Compute the new hash
int hash = hash_func(old_arr[idx].key);
// Get the position where it should be inserted
hash %= new_size;
// Atomically check if the position is free and insert the key
atomicCAS(&new_arr[hash].key, 0, old_arr[idx].key);
// Check if the key was inserted
if (new_arr[hash].key == old_arr[idx].key) {
// Insert the value
new_arr[hash].value = old_arr[idx].value;
} else { // Liniar probing
// Get the next posible position
hash++;
// Loop until the pair was inserted
// We have the guarantee the loop will break at some point
while(1) {
// Atomically check if the position is free and insert the key
atomicCAS(&new_arr[hash].key, 0, old_arr[idx].key);
// Check if the key was inserted
if (new_arr[hash].key == old_arr[idx].key) {
// Insert the value
new_arr[hash].value = old_arr[idx].value;
// Break the loop
return;
} else {
// Move to the next hash
// If the end of the table was reached start from 0
hash = (hash + 1) % new_size;
}
}
}
}
}
// Kernel that inserts a batch of key-value pairs in the table
// Uses liniar probing for collisions
__global__ void insert(Data *arr, int capacity, int *keys, int *values, int numKeys, int *existing) {
// Get the index of the key-value pair the current thread should insert
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check if the index is in bounds
if (idx < numKeys) {
Data pair;
pair.key = keys[idx];
pair.value = values[idx];
// Compute the hash
int hash = hash_func(pair.key);
// Get the position
hash %= capacity;
// Check if the key is already inserted at that position
if (arr[hash].key == pair.key) {
// Update the value
arr[hash].value = pair.value;
// Decrement the number of pairs inserted
atomicSub(existing, 1); // Didn't use atomicDec because it had a weird condition
}
else {
// Atomically check if the position is free and insert the key
atomicCAS(&arr[hash].key, 0, pair.key);
if (arr[hash].key == pair.key) {
// Insert the value
arr[hash].value = pair.value;
}
else { // Liniar probing
// Get the next possible position
hash++;
// Loop until the pair was inserted
// We have the guarantee the loop will break at some point
while(1) {
// If the key a;ready exists at that position
if (arr[hash].key == pair.key) {
// Update the value
arr[hash].value = pair.value;
// Decrement the number of pairs inserted
atomicSub(existing, 1);
return;
} else {
// Atomically check if the position is free and insert the key
atomicCAS(&arr[hash].key, 0, pair.key);
// Check if the key was inserted
if (arr[hash].key == pair.key) {
// Insert the value
arr[hash].value = pair.value;
return;
}
else {
// Go to the next position
hash = (hash + 1) % capacity;
}
}
}
}
}
}
}
// Kernel that gets the values for a batch of keys
__global__ void get(Data *arr, int *keys, int *values, int capacity, int numKeys) {
// Get the index of the key the current thread should look for
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check that the index is in bounds
if (idx < numKeys) {
int key = keys[idx]; // Get the key
// Compute the hash
int hash = hash_func(key);
// Get the position
hash %= capacity;
// Check if the key is there
if (arr[hash].key == key) {
// Get the value
values[idx] = arr[hash].value;
}
else { // Liniar probing
hash++;
// Check element by element until the key is found
while (1) {
if (arr[hash].key == key) {
values[idx] = arr[hash].value;
break;
}
else {
hash = (hash + 1) % capacity;
}
}
}
}
}
// Hash Table constructor
GpuHashTable::GpuHashTable(int size) {
this->capacity = size;
this->size = 0; // Initially the table is empty
// Allocate the table in the VRAM
glbGpuAllocator->_cudaMalloc((void **)&this->arr, this->capacity * sizeof(Data));
cudaCheckError();
// Set all the positions as empty (a.k.a zero)
hipMemset(this->arr, 0, this->capacity * sizeof(Data));
}
// Hash Table destructor
GpuHashTable::~GpuHashTable() {
glbGpuAllocator->_cudaFree(this->arr);
}
// Resizes the table
void GpuHashTable::reshape(int numBucketsReshape) {
Data *new_arr;
// Allocate the new array of the desired size
glbGpuAllocator->_cudaMalloc((void **)&new_arr, numBucketsReshape * sizeof(Data));
cudaCheckError();
// Initialize all the positions as empty
hipMemset(new_arr, 0, numBucketsReshape * sizeof(Data));
cudaCheckError();
// Calculate the number of blocks necessary
int block_num = this->capacity / BLOCK_SIZE;
if (this->capacity % BLOCK_SIZE)
block_num++;
// Call the kernel
hipLaunchKernelGGL(( rehash), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, this->arr, new_arr, this->capacity, numBucketsReshape);
hipDeviceSynchronize();
cudaCheckError();
// Free the old table
glbGpuAllocator->_cudaFree(this->arr);
cudaCheckError();
// Assign the new table
this->arr = new_arr;
// Update the capacity
this->capacity = numBucketsReshape;
}
// Inserts a batch of key-value pairs
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
// If the capacity would be exceeded
if (this->size + numKeys >= this->capacity) {
// Double the capacity
this->reshape((this->size + numKeys) * 2);
}
// Calculate the number of blocks necessary
int block_num = numKeys / BLOCK_SIZE;
if (numKeys % BLOCK_SIZE)
block_num++;
int *GPU_keys;
int *GPU_values;
int *GPU_numKeys;
// Allocate arrays for the keys and values to be inserted
glbGpuAllocator->_cudaMalloc((void **)&GPU_keys, numKeys * sizeof(int));
cudaCheckError();
glbGpuAllocator->_cudaMalloc((void **)&GPU_values, numKeys * sizeof(int));
cudaCheckError();
// Allocate a pointer for the number of keys inserted
// This will be updated with the number of keys that were actually inserted (not updated)
glbGpuAllocator->_cudaMalloc((void **)&GPU_numKeys, sizeof(int));
cudaCheckError();
// Copy the data into the GPU arrays
hipMemcpy(GPU_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(GPU_values, values, numKeys * sizeof(int), hipMemcpyHostToDevice);
cudaCheckError();
hipMemcpy(GPU_numKeys, &numKeys, sizeof(int), hipMemcpyHostToDevice);
cudaCheckError();
// Call the kernel
hipLaunchKernelGGL(( insert), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, this->arr, this->capacity, GPU_keys, GPU_values, numKeys, GPU_numKeys);
hipDeviceSynchronize();
cudaCheckError();
// Get the number of inserted keys
hipMemcpy(&numKeys, GPU_numKeys, sizeof(int), hipMemcpyDeviceToHost);
cudaCheckError();
// Update the size
this->size += numKeys;
// Free the gpu arrays
glbGpuAllocator->_cudaFree(GPU_keys);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_values);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_numKeys);
cudaCheckError();
return true;
}
// Gets a batch of values corresponding to the given keys
int* GpuHashTable::getBatch(int* keys, int numKeys) {
// Calculate the necessary number of blocks
int block_num = numKeys / BLOCK_SIZE;
if (numKeys % BLOCK_SIZE)
block_num++;
int *GPU_keys;
int *GPU_values;
// Allocate GPU arrays for the keys and values
glbGpuAllocator->_cudaMalloc((void **)&GPU_keys, numKeys * sizeof(int));
cudaCheckError();
glbGpuAllocator->_cudaMalloc((void **)&GPU_values, numKeys * sizeof(int));
cudaCheckError();
// Copy the keys
hipMemcpy(GPU_keys, keys, numKeys * sizeof(int), hipMemcpyHostToDevice);
cudaCheckError();
// Call the kernel
hipLaunchKernelGGL(( get), dim3(block_num), dim3(BLOCK_SIZE), 0, 0, this->arr, GPU_keys, GPU_values, this->capacity, numKeys);
hipDeviceSynchronize();
cudaCheckError();
// Allocate an array for the values
int *values = (int*)malloc(numKeys * sizeof(int));
// Copy the values from the GPU
hipMemcpy(values, GPU_values, numKeys * sizeof(int), hipMemcpyDeviceToHost);
cudaCheckError();
// Free the GPU arrays
glbGpuAllocator->_cudaFree(GPU_keys);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_values);
cudaCheckError();
return values;
}
| eb0400dfe2402558d2a3c5d4bbc877afd86c8b2d.cu | // Calin Bucur 332CB
// Tema 3 ASC
#include <iostream>
#include <limits.h>
#include <stdlib.h>
#include <ctime>
#include <sstream>
#include <string>
#include "test_map.hpp"
#include "gpu_hashtable.hpp"
using namespace std;
// Number of threads in a block
#define BLOCK_SIZE 256
// Hashing function
// Got it from the internet
// Apparently the distribution is quite uniform for reasons beyond my understanding
__device__ int hash_func(int key) {
int hash = key;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = ((hash >> 16) ^ hash) * 0x45d9f3b;
hash = (hash >> 16) ^ hash;
return hash;
}
// Kernel that rehashes every key-value pair from the old table and inserts it into the new one
__global__ void rehash (Data *old_arr, Data *new_arr, int old_size, int new_size) {
// Get the index of the key-value pair the current thread should rehash
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check if the index is within bounds and if there is a pair at that index
if (idx < old_size && old_arr[idx].key != 0) {
// Compute the new hash
int hash = hash_func(old_arr[idx].key);
// Get the position where it should be inserted
hash %= new_size;
// Atomically check if the position is free and insert the key
atomicCAS(&new_arr[hash].key, 0, old_arr[idx].key);
// Check if the key was inserted
if (new_arr[hash].key == old_arr[idx].key) {
// Insert the value
new_arr[hash].value = old_arr[idx].value;
} else { // Liniar probing
// Get the next posible position
hash++;
// Loop until the pair was inserted
// We have the guarantee the loop will break at some point
while(1) {
// Atomically check if the position is free and insert the key
atomicCAS(&new_arr[hash].key, 0, old_arr[idx].key);
// Check if the key was inserted
if (new_arr[hash].key == old_arr[idx].key) {
// Insert the value
new_arr[hash].value = old_arr[idx].value;
// Break the loop
return;
} else {
// Move to the next hash
// If the end of the table was reached start from 0
hash = (hash + 1) % new_size;
}
}
}
}
}
// Kernel that inserts a batch of key-value pairs in the table
// Uses liniar probing for collisions
__global__ void insert(Data *arr, int capacity, int *keys, int *values, int numKeys, int *existing) {
// Get the index of the key-value pair the current thread should insert
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check if the index is in bounds
if (idx < numKeys) {
Data pair;
pair.key = keys[idx];
pair.value = values[idx];
// Compute the hash
int hash = hash_func(pair.key);
// Get the position
hash %= capacity;
// Check if the key is already inserted at that position
if (arr[hash].key == pair.key) {
// Update the value
arr[hash].value = pair.value;
// Decrement the number of pairs inserted
atomicSub(existing, 1); // Didn't use atomicDec because it had a weird condition
}
else {
// Atomically check if the position is free and insert the key
atomicCAS(&arr[hash].key, 0, pair.key);
if (arr[hash].key == pair.key) {
// Insert the value
arr[hash].value = pair.value;
}
else { // Liniar probing
// Get the next possible position
hash++;
// Loop until the pair was inserted
// We have the guarantee the loop will break at some point
while(1) {
// If the key a;ready exists at that position
if (arr[hash].key == pair.key) {
// Update the value
arr[hash].value = pair.value;
// Decrement the number of pairs inserted
atomicSub(existing, 1);
return;
} else {
// Atomically check if the position is free and insert the key
atomicCAS(&arr[hash].key, 0, pair.key);
// Check if the key was inserted
if (arr[hash].key == pair.key) {
// Insert the value
arr[hash].value = pair.value;
return;
}
else {
// Go to the next position
hash = (hash + 1) % capacity;
}
}
}
}
}
}
}
// Kernel that gets the values for a batch of keys
__global__ void get(Data *arr, int *keys, int *values, int capacity, int numKeys) {
// Get the index of the key the current thread should look for
unsigned int idx = threadIdx.x + blockDim.x * blockIdx.x;
// Check that the index is in bounds
if (idx < numKeys) {
int key = keys[idx]; // Get the key
// Compute the hash
int hash = hash_func(key);
// Get the position
hash %= capacity;
// Check if the key is there
if (arr[hash].key == key) {
// Get the value
values[idx] = arr[hash].value;
}
else { // Liniar probing
hash++;
// Check element by element until the key is found
while (1) {
if (arr[hash].key == key) {
values[idx] = arr[hash].value;
break;
}
else {
hash = (hash + 1) % capacity;
}
}
}
}
}
// Hash Table constructor
GpuHashTable::GpuHashTable(int size) {
this->capacity = size;
this->size = 0; // Initially the table is empty
// Allocate the table in the VRAM
glbGpuAllocator->_cudaMalloc((void **)&this->arr, this->capacity * sizeof(Data));
cudaCheckError();
// Set all the positions as empty (a.k.a zero)
cudaMemset(this->arr, 0, this->capacity * sizeof(Data));
}
// Hash Table destructor
GpuHashTable::~GpuHashTable() {
glbGpuAllocator->_cudaFree(this->arr);
}
// Resizes the table
void GpuHashTable::reshape(int numBucketsReshape) {
Data *new_arr;
// Allocate the new array of the desired size
glbGpuAllocator->_cudaMalloc((void **)&new_arr, numBucketsReshape * sizeof(Data));
cudaCheckError();
// Initialize all the positions as empty
cudaMemset(new_arr, 0, numBucketsReshape * sizeof(Data));
cudaCheckError();
// Calculate the number of blocks necessary
int block_num = this->capacity / BLOCK_SIZE;
if (this->capacity % BLOCK_SIZE)
block_num++;
// Call the kernel
rehash<<<block_num, BLOCK_SIZE>>>(this->arr, new_arr, this->capacity, numBucketsReshape);
cudaDeviceSynchronize();
cudaCheckError();
// Free the old table
glbGpuAllocator->_cudaFree(this->arr);
cudaCheckError();
// Assign the new table
this->arr = new_arr;
// Update the capacity
this->capacity = numBucketsReshape;
}
// Inserts a batch of key-value pairs
bool GpuHashTable::insertBatch(int *keys, int* values, int numKeys) {
// If the capacity would be exceeded
if (this->size + numKeys >= this->capacity) {
// Double the capacity
this->reshape((this->size + numKeys) * 2);
}
// Calculate the number of blocks necessary
int block_num = numKeys / BLOCK_SIZE;
if (numKeys % BLOCK_SIZE)
block_num++;
int *GPU_keys;
int *GPU_values;
int *GPU_numKeys;
// Allocate arrays for the keys and values to be inserted
glbGpuAllocator->_cudaMalloc((void **)&GPU_keys, numKeys * sizeof(int));
cudaCheckError();
glbGpuAllocator->_cudaMalloc((void **)&GPU_values, numKeys * sizeof(int));
cudaCheckError();
// Allocate a pointer for the number of keys inserted
// This will be updated with the number of keys that were actually inserted (not updated)
glbGpuAllocator->_cudaMalloc((void **)&GPU_numKeys, sizeof(int));
cudaCheckError();
// Copy the data into the GPU arrays
cudaMemcpy(GPU_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(GPU_values, values, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaCheckError();
cudaMemcpy(GPU_numKeys, &numKeys, sizeof(int), cudaMemcpyHostToDevice);
cudaCheckError();
// Call the kernel
insert<<<block_num, BLOCK_SIZE>>>(this->arr, this->capacity, GPU_keys, GPU_values, numKeys, GPU_numKeys);
cudaDeviceSynchronize();
cudaCheckError();
// Get the number of inserted keys
cudaMemcpy(&numKeys, GPU_numKeys, sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckError();
// Update the size
this->size += numKeys;
// Free the gpu arrays
glbGpuAllocator->_cudaFree(GPU_keys);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_values);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_numKeys);
cudaCheckError();
return true;
}
// Gets a batch of values corresponding to the given keys
int* GpuHashTable::getBatch(int* keys, int numKeys) {
// Calculate the necessary number of blocks
int block_num = numKeys / BLOCK_SIZE;
if (numKeys % BLOCK_SIZE)
block_num++;
int *GPU_keys;
int *GPU_values;
// Allocate GPU arrays for the keys and values
glbGpuAllocator->_cudaMalloc((void **)&GPU_keys, numKeys * sizeof(int));
cudaCheckError();
glbGpuAllocator->_cudaMalloc((void **)&GPU_values, numKeys * sizeof(int));
cudaCheckError();
// Copy the keys
cudaMemcpy(GPU_keys, keys, numKeys * sizeof(int), cudaMemcpyHostToDevice);
cudaCheckError();
// Call the kernel
get<<<block_num, BLOCK_SIZE>>>(this->arr, GPU_keys, GPU_values, this->capacity, numKeys);
cudaDeviceSynchronize();
cudaCheckError();
// Allocate an array for the values
int *values = (int*)malloc(numKeys * sizeof(int));
// Copy the values from the GPU
cudaMemcpy(values, GPU_values, numKeys * sizeof(int), cudaMemcpyDeviceToHost);
cudaCheckError();
// Free the GPU arrays
glbGpuAllocator->_cudaFree(GPU_keys);
cudaCheckError();
glbGpuAllocator->_cudaFree(GPU_values);
cudaCheckError();
return values;
}
|
0fa2a634d5312a3654ab146fc46e074853c2a74f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../orc.h"
#include "../orc_gpu.h"
#include "orc_reader_impl.hpp"
#include "../timezone.h"
#include <io/comp/gpuinflate.h>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <numeric>
#include <utility>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace orc {
static_assert(sizeof(orc::gpu::CompressedStreamInfo) <= 256 &&
!(sizeof(orc::gpu::CompressedStreamInfo) & 7),
"Unexpected sizeof(CompressedStreamInfo)");
static_assert(sizeof(orc::gpu::ColumnDesc) <= 256 &&
!(sizeof(orc::gpu::ColumnDesc) & 7),
"Unexpected sizeof(ColumnDesc)");
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
**/
constexpr int32_t to_clockrate(gdf_time_unit time_unit) {
switch (time_unit) {
case TIME_UNIT_s:
return 1;
case TIME_UNIT_ms:
return 1000;
case TIME_UNIT_us:
return 1000000;
case TIME_UNIT_ns:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that translates ORC datatype to GDF dtype
**/
constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype(
const orc::SchemaType &schema, bool use_np_dtypes, gdf_time_unit ts_unit) {
switch (schema.kind) {
case orc::BOOLEAN:
return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::BYTE:
return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::SHORT:
return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::INT:
return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::LONG:
return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::FLOAT:
return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::DOUBLE:
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to GDF_STRING
return std::make_pair(GDF_STRING, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::TIMESTAMP:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ns});
case orc::DATE:
// There isn't a (GDF_DATE32 -> np.dtype) mapping
return (use_np_dtypes)
? std::make_pair(GDF_DATE64,
gdf_dtype_extra_info{TIME_UNIT_ms})
: std::make_pair(GDF_DATE32,
gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float
static_assert(DECIMALS_AS_FLOAT64 == 1, "Missing decimal->float");
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
default:
break;
}
return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
constexpr std::pair<orc::gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child) {
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(orc::gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(orc::gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA:
return std::make_pair(orc::gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(orc::gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX:
return std::make_pair(orc::gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(orc::gpu::CI_NUM_STREAMS, 0);
}
}
/**
* @brief A helper class for ORC file metadata. Provides some additional
* convenience methods for initializing and accessing metadata.
**/
class OrcMetadata {
using OrcStripeInfo =
std::pair<const orc::StripeInformation *, const orc::StripeFooter *>;
public:
explicit OrcMetadata(datasource *const src) : source(src) {
const auto len = source->size();
const auto max_ps_size = ::min(len, static_cast<size_t>(256));
// Read uncompressed postscript section (max 255 bytes + 1 byte for length)
auto buffer = source->get_buffer(len - max_ps_size, max_ps_size);
const size_t ps_length = buffer->data()[max_ps_size - 1];
const uint8_t *ps_data = &buffer->data()[max_ps_size - ps_length - 1];
orc::ProtobufReader pb;
pb.init(ps_data, ps_length);
CUDF_EXPECTS(pb.read(&ps, ps_length), "Cannot read postscript");
CUDF_EXPECTS(ps.footerLength + ps_length < len, "Invalid footer length");
print_postscript(ps_length);
// If compression is used, all the rest of the metadata is compressed
// If no compressed is used, the decompressor is simply a pass-through
decompressor = std::make_unique<orc::OrcDecompressor>(
ps.compression, ps.compressionBlockSize);
// Read compressed filefooter section
buffer = source->get_buffer(len - ps_length - 1 - ps.footerLength,
ps.footerLength);
size_t ff_length = 0;
auto ff_data = decompressor->Decompress(buffer->data(), ps.footerLength, &ff_length);
pb.init(ff_data, ff_length);
CUDF_EXPECTS(pb.read(&ff, ff_length), "Cannot read filefooter");
CUDF_EXPECTS(get_num_columns() > 0, "No columns found");
print_filefooter();
}
/**
* @brief Filters and reads the info of only a selection of stripes
*
* @param[in] stripe Index of the stripe to select
* @param[in] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of stripe info and total number of selected rows
**/
auto select_stripes(int stripe, int &row_start, int &row_count) {
std::vector<OrcStripeInfo> selection;
if (stripe != -1) {
CUDF_EXPECTS(stripe < get_num_stripes(), "Non-existent stripe");
selection.emplace_back(&ff.stripes[stripe], nullptr);
if (row_count < 0) {
row_count = ff.stripes[stripe].numberOfRows;
} else {
row_count = ::min(row_count, (int)ff.stripes[stripe].numberOfRows);
}
} else {
row_start = ::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
int stripe_skip_rows = 0;
for (int i = 0, count = 0; i < (int)ff.stripes.size(); ++i) {
count += ff.stripes[i].numberOfRows;
if (count > row_start) {
if (selection.size() == 0) {
stripe_skip_rows = row_start - (count - ff.stripes[i].numberOfRows);
}
selection.emplace_back(&ff.stripes[i], nullptr);
}
if (count >= (row_start + row_count)) {
break;
}
}
row_start = stripe_skip_rows;
}
// Read each stripe's stripefooter metadata
if (not selection.empty()) {
orc::ProtobufReader pb;
stripefooters.resize(selection.size());
for (size_t i = 0; i < selection.size(); ++i) {
const auto stripe = selection[i].first;
const auto sf_comp_offset =
stripe->offset + stripe->indexLength + stripe->dataLength;
const auto sf_comp_length = stripe->footerLength;
CUDF_EXPECTS(sf_comp_offset + sf_comp_length < source->size(),
"Invalid stripe information");
const auto buffer = source->get_buffer(sf_comp_offset, sf_comp_length);
size_t sf_length = 0;
auto sf_data = decompressor->Decompress(buffer->data(), sf_comp_length,
&sf_length);
pb.init(sf_data, sf_length);
CUDF_EXPECTS(pb.read(&stripefooters[i], sf_length),
"Cannot read stripefooter");
selection[i].second = &stripefooters[i];
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[out] has_timestamp_column Whether there is a orc::TIMESTAMP column
*
* @return List of ORC column indexes
**/
auto select_columns(std::vector<std::string> use_names,
bool &has_timestamp_column) {
std::vector<int> selection;
if (not use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < get_num_columns(); ++i, ++index) {
if (index >= get_num_columns()) {
index = 0;
}
if (ff.GetColumnName(index) == use_name) {
selection.emplace_back(index);
index++;
if (ff.types[i].kind == orc::TIMESTAMP) {
has_timestamp_column = true;
}
break;
}
}
}
} else {
// For now, only select all leaf nodes
for (int i = 0; i < get_num_columns(); ++i) {
if (ff.types[i].subtypes.size() == 0) {
selection.emplace_back(i);
if (ff.types[i].kind == orc::TIMESTAMP) {
has_timestamp_column = true;
}
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
inline int get_total_rows() const { return ff.numberOfRows; }
inline int get_num_stripes() const { return ff.stripes.size(); }
inline int get_num_columns() const { return ff.types.size(); }
inline int get_row_index_stride() const { return ff.rowIndexStride; }
private:
void print_postscript(size_t ps_length) const {
LOG_PRINTF("\n[+] PostScript:\n");
LOG_PRINTF(" postscriptLength = %zd\n", ps_length);
LOG_PRINTF(" footerLength = %zd\n", (size_t)ps.footerLength);
LOG_PRINTF(" compression = %d\n", ps.compression);
LOG_PRINTF(" compressionBlockSize = %d\n", ps.compressionBlockSize);
LOG_PRINTF(" version(%zd) = {%d,%d}\n", ps.version.size(),
(ps.version.size() > 0) ? (int32_t)ps.version[0] : -1,
(ps.version.size() > 1) ? (int32_t)ps.version[1] : -1);
LOG_PRINTF(" metadataLength = %zd\n", (size_t)ps.metadataLength);
LOG_PRINTF(" magic = \"%s\"\n", ps.magic.c_str());
}
void print_filefooter() const {
LOG_PRINTF("\n[+] FileFooter:\n");
LOG_PRINTF(" headerLength = %zd\n", ff.headerLength);
LOG_PRINTF(" contentLength = %zd\n", ff.contentLength);
LOG_PRINTF(" stripes (%zd entries):\n", ff.stripes.size());
for (size_t i = 0; i < ff.stripes.size(); i++) {
LOG_PRINTF(
" [%zd] @ %zd: %d rows, index+data+footer: %zd+%zd+%d bytes\n", i,
ff.stripes[i].offset, ff.stripes[i].numberOfRows,
ff.stripes[i].indexLength, ff.stripes[i].dataLength,
ff.stripes[i].footerLength);
}
LOG_PRINTF(" types (%zd entries):\n", ff.types.size());
for (size_t i = 0; i < ff.types.size(); i++) {
LOG_PRINTF(" column [%zd]: kind = %d, parent = %d\n", i,
ff.types[i].kind, ff.types[i].parent_idx);
if (ff.types[i].subtypes.size() > 0) {
LOG_PRINTF(" subtypes = ");
for (size_t j = 0; j < ff.types[i].subtypes.size(); j++) {
LOG_PRINTF("%c%d", (j) ? ',' : '{', ff.types[i].subtypes[j]);
}
LOG_PRINTF("}\n");
}
if (ff.types[i].fieldNames.size() > 0) {
LOG_PRINTF(" fieldNames = ");
for (size_t j = 0; j < ff.types[i].fieldNames.size(); j++) {
LOG_PRINTF("%c\"%s\"", (j) ? ',' : '{',
ff.types[i].fieldNames[j].c_str());
}
LOG_PRINTF("}\n");
}
}
if (ff.metadata.size() > 0) {
LOG_PRINTF(" metadata (%zd entries):\n", ff.metadata.size());
for (size_t i = 0; i < ff.metadata.size(); i++) {
LOG_PRINTF(" [%zd] \"%s\" = \"%s\"\n", i, ff.metadata[i].name.c_str(),
ff.metadata[i].value.c_str());
}
}
LOG_PRINTF(" numberOfRows = %zd\n", ff.numberOfRows);
LOG_PRINTF(" rowIndexStride = %d\n", ff.rowIndexStride);
}
public:
orc::PostScript ps;
orc::FileFooter ff;
std::vector<orc::StripeFooter> stripefooters;
std::unique_ptr<orc::OrcDecompressor> decompressor;
private:
datasource *const source;
};
/**
* @brief Struct that maps ORC streams to columns
**/
struct OrcStreamInfo {
OrcStreamInfo() = default;
explicit OrcStreamInfo(uint64_t offset_, size_t dst_pos_, uint32_t length_,
uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_) {}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to the beginning of the compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // gdf column index
uint32_t stripe_idx; // stripe index
};
size_t reader::Impl::gather_stream_info(
const size_t stripe_index, const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter, const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc, const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<orc::gpu::ColumnDesc> &chunks,
std::vector<OrcStreamInfo> &stream_info) {
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[orc::gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[orc::gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count,
col == orc2gdf[stream.column]);
if (idx.first < orc::gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == orc::gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len =
stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries +=
stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(stripeinfo->offset + src_offset, dst_offset,
stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
rmm::device_buffer reader::Impl::decompress_stripe_data(
const hostdevice_vector<orc::gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const orc::OrcDecompressor *decompressor,
std::vector<OrcStreamInfo> &stream_info, size_t num_stripes,
rmm::device_vector<orc::gpu::RowGroup> &row_groups,
size_t row_index_stride) {
// Parse the columns' compressed info
hostdevice_vector<orc::gpu::CompressedStreamInfo> compinfo(0, stream_info.size());
for (const auto &info : stream_info) {
compinfo.insert(orc::gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) +
info.dst_pos,
info.length));
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(), compinfo.host_ptr(),
compinfo.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(ParseCompressedStripeData(
compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio()));
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(), compinfo.device_ptr(),
compinfo.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decompressed_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decompressed_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decompressed_size > 0, "No decompressible data found");
LOG_PRINTF(
"[+] Compression\n Total compressed size: %zd\n Number of "
"compressed blocks: %zd\n Codec: %d\n",
total_decompressed_size, num_compressed_blocks, decompressor->GetKind());
rmm::device_buffer decomp_data(align_size(total_decompressed_size));
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks + num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(hipMemcpyAsync(compinfo.device_ptr(), compinfo.host_ptr(),
compinfo.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(ParseCompressedStripeData(
compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio()));
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(inflate_in.data().get(), inflate_out.data().get(),
num_compressed_blocks, 0));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.data().get(), inflate_out.data().get(),
num_compressed_blocks));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(inflate_in.data().get() + num_compressed_blocks,
num_uncompressed_blocks));
}
CUDA_TRY(PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size()));
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(hipMemcpyAsync(compinfo.host_ptr(), compinfo.device_ptr(),
compinfo.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < orc::gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(ParseRowGroupIndex(row_groups.data().get(), compinfo.device_ptr(),
chunks.device_ptr(), num_columns, num_stripes,
row_groups.size() / num_columns,
row_index_stride));
}
return decomp_data;
}
void reader::Impl::decode_stream_data(
const hostdevice_vector<orc::gpu::ColumnDesc> &chunks, size_t num_dicts,
size_t skip_rows, const std::vector<int64_t> &timezone_table,
rmm::device_vector<orc::gpu::RowGroup> &row_groups, size_t row_index_stride,
const std::vector<gdf_column_wrapper> &columns) {
const size_t num_columns = columns.size();
const size_t num_stripes = chunks.size() / columns.size();
const size_t num_rows = columns[0]->size;
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.valid_map_base = reinterpret_cast<uint32_t *>(columns[j]->valid);
chunk.column_data_base = columns[j]->data;
chunk.dtype_len = (columns[j]->dtype == GDF_STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(columns[j]->dtype);
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<orc::gpu::DictionaryEntry> global_dict(num_dicts);
// Allocate timezone transition table timestamp conversion
rmm::device_vector<int64_t> tz_table = timezone_table;
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(DecodeNullsAndStringDictionaries(
chunks.device_ptr(), global_dict.data().get(), num_columns, num_stripes,
num_rows, skip_rows));
CUDA_TRY(DecodeOrcColumnData(
chunks.device_ptr(), global_dict.data().get(), num_columns, num_stripes,
num_rows, skip_rows, tz_table.data().get(), tz_table.size(),
row_groups.data().get(), row_groups.size() / num_columns,
row_index_stride));
CUDA_TRY(hipMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), hipMemcpyDeviceToHost));
CUDA_TRY(hipStreamSynchronize(0));
LOG_PRINTF("[+] Decoded Column Information\n");
for (size_t i = 0; i < num_columns; ++i) {
for (size_t j = 0; j < num_stripes; ++j) {
columns[i]->null_count += chunks[j * num_columns + i].null_count;
}
LOG_PRINTF(
"columns[%zd].null_count = %d/%d (start_row=%d, nrows=%d, "
"strm_len=%d)\n",
i, columns[i]->null_count, columns[i]->size, chunks[i].start_row,
chunks[i].num_rows, chunks[i].strm_len[orc::gpu::CI_PRESENT]);
}
}
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)) {
// Open and parse the source Parquet dataset metadata
md_ = std::make_unique<OrcMetadata>(source_.get());
// Select only columns required by the options
selected_cols_ = md_->select_columns(options.columns, has_timestamp_column_);
// Override output timestamp resolution if requested
if (options.timestamp_unit != TIME_UNIT_NONE) {
timestamp_unit_ = options.timestamp_unit;
}
// Enable or disable attempt to use row index for parsing
use_index_ = options.use_index;
// Enable or disable the conversion to numpy-compatible dtypes
use_np_dtypes_ = options.use_np_dtypes;
}
table reader::Impl::read(int skip_rows, int num_rows, int stripe) {
// Select only stripes required (aka row groups)
const auto selected_stripes =
md_->select_stripes(stripe, skip_rows, num_rows);
const int num_columns = selected_cols_.size();
// Association between each ORC column and its gdf_column
std::vector<int32_t> orc_col_map(md_->get_num_columns(), -1);
// Initialize gdf_columns, but hold off on allocating storage space
std::vector<gdf_column_wrapper> columns;
LOG_PRINTF("[+] Selected columns: %d\n", num_columns);
for (const auto &col : selected_cols_) {
auto dtype_info =
to_dtype(md_->ff.types[col], use_np_dtypes_, timestamp_unit_);
// Map each ORC column to its gdf_column
orc_col_map[col] = columns.size();
columns.emplace_back(static_cast<cudf::size_type>(selected_stripes.size() > 0 ? num_rows : 0), dtype_info.first,
dtype_info.second, md_->ff.GetColumnName(col));
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
// Logically view streams as columns
std::vector<OrcStreamInfo> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
if (num_rows > 0 && selected_stripes.size() > 0) {
const auto num_column_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<orc::gpu::ColumnDesc> chunks(num_column_chunks);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(use_index_ == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > md_->get_row_index_stride() &&
!(md_->get_row_index_stride() & 7) && md_->get_row_index_stride() > 0 &&
num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(
i, stripe_info, stripe_footer, orc_col_map, selected_cols_,
md_->ff.types, use_index, &num_dict_entries, chunks, stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(align_size(total_data_size));
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = source_->get_buffer(offset, len);
CUDA_TRY(hipMemcpyAsync(d_dst, buffer->data(), len,
hipMemcpyHostToDevice));
CUDA_TRY(hipStreamSynchronize(0));
}
// Update chunks to reference streams pointers
for (int j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[selected_cols_[j]].kind;
chunk.type_kind = md_->ff.types[selected_cols_[j]].kind;
chunk.decimal_scale = md_->ff.types[selected_cols_[j]].scale;
chunk.rowgroup_id = num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(timestamp_unit_);
}
for (int k = 0; k < orc::gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups +=
(stripe_info->numberOfRows + md_->get_row_index_stride() - 1) /
md_->get_row_index_stride();
}
}
// Setup table for converting timestamp columns from local to UTC time
std::vector<int64_t> tz_table;
if (has_timestamp_column_ && !selected_stripes.empty()) {
CUDF_EXPECTS(BuildTimezoneTransitionTable(
tz_table, selected_stripes[0].second->writerTimezone),
"Cannot setup timezone LUT");
}
// Setup row group descriptors if using indexes
rmm::device_vector<orc::gpu::RowGroup> row_groups(num_rowgroups *
num_columns);
if (md_->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(
chunks, stripe_data, md_->decompressor.get(), stream_info,
selected_stripes.size(), row_groups, md_->get_row_index_stride());
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(hipMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), hipMemcpyHostToDevice));
CUDA_TRY(ParseRowGroupIndex(
row_groups.data().get(), nullptr, chunks.device_ptr(), num_columns,
selected_stripes.size(), num_rowgroups, md_->get_row_index_stride()));
}
}
for (auto &column : columns) {
column.allocate();
}
decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table,
row_groups, md_->get_row_index_stride(), columns);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
// Columns' data's memory is still expected for an empty dataframe
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
table reader::read_all() { return impl_->read(0, -1, -1); }
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1);
}
table reader::read_stripe(size_t stripe) { return impl_->read(0, -1, stripe); }
reader::~reader() = default;
} // namespace orc
} // namespace io
} // namespace cudf
| 0fa2a634d5312a3654ab146fc46e074853c2a74f.cu | /*
* Copyright (c) 2019, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "../orc.h"
#include "../orc_gpu.h"
#include "orc_reader_impl.hpp"
#include "../timezone.h"
#include <io/comp/gpuinflate.h>
#include <algorithm>
#include <cstring>
#include <iostream>
#include <numeric>
#include <utility>
#include <rmm/device_buffer.hpp>
namespace cudf {
namespace io {
namespace orc {
static_assert(sizeof(orc::gpu::CompressedStreamInfo) <= 256 &&
!(sizeof(orc::gpu::CompressedStreamInfo) & 7),
"Unexpected sizeof(CompressedStreamInfo)");
static_assert(sizeof(orc::gpu::ColumnDesc) <= 256 &&
!(sizeof(orc::gpu::ColumnDesc) & 7),
"Unexpected sizeof(ColumnDesc)");
#if 0
#define LOG_PRINTF(...) std::printf(__VA_ARGS__)
#else
#define LOG_PRINTF(...) (void)0
#endif
/**
* @brief Function that translates cuDF time unit to ORC clock frequency
**/
constexpr int32_t to_clockrate(gdf_time_unit time_unit) {
switch (time_unit) {
case TIME_UNIT_s:
return 1;
case TIME_UNIT_ms:
return 1000;
case TIME_UNIT_us:
return 1000000;
case TIME_UNIT_ns:
return 1000000000;
default:
return 0;
}
}
/**
* @brief Function that translates ORC datatype to GDF dtype
**/
constexpr std::pair<gdf_dtype, gdf_dtype_extra_info> to_dtype(
const orc::SchemaType &schema, bool use_np_dtypes, gdf_time_unit ts_unit) {
switch (schema.kind) {
case orc::BOOLEAN:
return std::make_pair(GDF_BOOL8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::BYTE:
return std::make_pair(GDF_INT8, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::SHORT:
return std::make_pair(GDF_INT16, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::INT:
return std::make_pair(GDF_INT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::LONG:
return std::make_pair(GDF_INT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::FLOAT:
return std::make_pair(GDF_FLOAT32, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::DOUBLE:
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::STRING:
case orc::BINARY:
case orc::VARCHAR:
case orc::CHAR:
// Variable-length types can all be mapped to GDF_STRING
return std::make_pair(GDF_STRING, gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::TIMESTAMP:
return (ts_unit != TIME_UNIT_NONE)
? std::make_pair(GDF_TIMESTAMP, gdf_dtype_extra_info{ts_unit})
: std::make_pair(GDF_TIMESTAMP,
gdf_dtype_extra_info{TIME_UNIT_ns});
case orc::DATE:
// There isn't a (GDF_DATE32 -> np.dtype) mapping
return (use_np_dtypes)
? std::make_pair(GDF_DATE64,
gdf_dtype_extra_info{TIME_UNIT_ms})
: std::make_pair(GDF_DATE32,
gdf_dtype_extra_info{TIME_UNIT_NONE});
case orc::DECIMAL:
// There isn't an arbitrary-precision type in cuDF, so map as float
static_assert(DECIMALS_AS_FLOAT64 == 1, "Missing decimal->float");
return std::make_pair(GDF_FLOAT64, gdf_dtype_extra_info{TIME_UNIT_NONE});
default:
break;
}
return std::make_pair(GDF_invalid, gdf_dtype_extra_info{TIME_UNIT_NONE});
}
constexpr std::pair<orc::gpu::StreamIndexType, uint32_t> get_index_type_and_pos(
const orc::StreamKind kind, uint32_t skip_count, bool non_child) {
switch (kind) {
case orc::DATA:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 8;
return std::make_pair(orc::gpu::CI_DATA, skip_count);
case orc::LENGTH:
case orc::SECONDARY:
skip_count += 1;
skip_count |= (skip_count & 0xff) << 16;
return std::make_pair(orc::gpu::CI_DATA2, skip_count);
case orc::DICTIONARY_DATA:
return std::make_pair(orc::gpu::CI_DICTIONARY, skip_count);
case orc::PRESENT:
skip_count += (non_child ? 1 : 0);
return std::make_pair(orc::gpu::CI_PRESENT, skip_count);
case orc::ROW_INDEX:
return std::make_pair(orc::gpu::CI_INDEX, skip_count);
default:
// Skip this stream as it's not strictly required
return std::make_pair(orc::gpu::CI_NUM_STREAMS, 0);
}
}
/**
* @brief A helper class for ORC file metadata. Provides some additional
* convenience methods for initializing and accessing metadata.
**/
class OrcMetadata {
using OrcStripeInfo =
std::pair<const orc::StripeInformation *, const orc::StripeFooter *>;
public:
explicit OrcMetadata(datasource *const src) : source(src) {
const auto len = source->size();
const auto max_ps_size = std::min(len, static_cast<size_t>(256));
// Read uncompressed postscript section (max 255 bytes + 1 byte for length)
auto buffer = source->get_buffer(len - max_ps_size, max_ps_size);
const size_t ps_length = buffer->data()[max_ps_size - 1];
const uint8_t *ps_data = &buffer->data()[max_ps_size - ps_length - 1];
orc::ProtobufReader pb;
pb.init(ps_data, ps_length);
CUDF_EXPECTS(pb.read(&ps, ps_length), "Cannot read postscript");
CUDF_EXPECTS(ps.footerLength + ps_length < len, "Invalid footer length");
print_postscript(ps_length);
// If compression is used, all the rest of the metadata is compressed
// If no compressed is used, the decompressor is simply a pass-through
decompressor = std::make_unique<orc::OrcDecompressor>(
ps.compression, ps.compressionBlockSize);
// Read compressed filefooter section
buffer = source->get_buffer(len - ps_length - 1 - ps.footerLength,
ps.footerLength);
size_t ff_length = 0;
auto ff_data = decompressor->Decompress(buffer->data(), ps.footerLength, &ff_length);
pb.init(ff_data, ff_length);
CUDF_EXPECTS(pb.read(&ff, ff_length), "Cannot read filefooter");
CUDF_EXPECTS(get_num_columns() > 0, "No columns found");
print_filefooter();
}
/**
* @brief Filters and reads the info of only a selection of stripes
*
* @param[in] stripe Index of the stripe to select
* @param[in] row_start Starting row of the selection
* @param[in,out] row_count Total number of rows selected
*
* @return List of stripe info and total number of selected rows
**/
auto select_stripes(int stripe, int &row_start, int &row_count) {
std::vector<OrcStripeInfo> selection;
if (stripe != -1) {
CUDF_EXPECTS(stripe < get_num_stripes(), "Non-existent stripe");
selection.emplace_back(&ff.stripes[stripe], nullptr);
if (row_count < 0) {
row_count = ff.stripes[stripe].numberOfRows;
} else {
row_count = std::min(row_count, (int)ff.stripes[stripe].numberOfRows);
}
} else {
row_start = std::max(row_start, 0);
if (row_count == -1) {
row_count = get_total_rows();
}
CUDF_EXPECTS(row_count >= 0, "Invalid row count");
CUDF_EXPECTS(row_start <= get_total_rows(), "Invalid row start");
int stripe_skip_rows = 0;
for (int i = 0, count = 0; i < (int)ff.stripes.size(); ++i) {
count += ff.stripes[i].numberOfRows;
if (count > row_start) {
if (selection.size() == 0) {
stripe_skip_rows = row_start - (count - ff.stripes[i].numberOfRows);
}
selection.emplace_back(&ff.stripes[i], nullptr);
}
if (count >= (row_start + row_count)) {
break;
}
}
row_start = stripe_skip_rows;
}
// Read each stripe's stripefooter metadata
if (not selection.empty()) {
orc::ProtobufReader pb;
stripefooters.resize(selection.size());
for (size_t i = 0; i < selection.size(); ++i) {
const auto stripe = selection[i].first;
const auto sf_comp_offset =
stripe->offset + stripe->indexLength + stripe->dataLength;
const auto sf_comp_length = stripe->footerLength;
CUDF_EXPECTS(sf_comp_offset + sf_comp_length < source->size(),
"Invalid stripe information");
const auto buffer = source->get_buffer(sf_comp_offset, sf_comp_length);
size_t sf_length = 0;
auto sf_data = decompressor->Decompress(buffer->data(), sf_comp_length,
&sf_length);
pb.init(sf_data, sf_length);
CUDF_EXPECTS(pb.read(&stripefooters[i], sf_length),
"Cannot read stripefooter");
selection[i].second = &stripefooters[i];
}
}
return selection;
}
/**
* @brief Filters and reduces down to a selection of columns
*
* @param[in] use_names List of column names to select
* @param[out] has_timestamp_column Whether there is a orc::TIMESTAMP column
*
* @return List of ORC column indexes
**/
auto select_columns(std::vector<std::string> use_names,
bool &has_timestamp_column) {
std::vector<int> selection;
if (not use_names.empty()) {
int index = 0;
for (const auto &use_name : use_names) {
for (int i = 0; i < get_num_columns(); ++i, ++index) {
if (index >= get_num_columns()) {
index = 0;
}
if (ff.GetColumnName(index) == use_name) {
selection.emplace_back(index);
index++;
if (ff.types[i].kind == orc::TIMESTAMP) {
has_timestamp_column = true;
}
break;
}
}
}
} else {
// For now, only select all leaf nodes
for (int i = 0; i < get_num_columns(); ++i) {
if (ff.types[i].subtypes.size() == 0) {
selection.emplace_back(i);
if (ff.types[i].kind == orc::TIMESTAMP) {
has_timestamp_column = true;
}
}
}
}
CUDF_EXPECTS(selection.size() > 0, "Filtered out all columns");
return selection;
}
inline int get_total_rows() const { return ff.numberOfRows; }
inline int get_num_stripes() const { return ff.stripes.size(); }
inline int get_num_columns() const { return ff.types.size(); }
inline int get_row_index_stride() const { return ff.rowIndexStride; }
private:
void print_postscript(size_t ps_length) const {
LOG_PRINTF("\n[+] PostScript:\n");
LOG_PRINTF(" postscriptLength = %zd\n", ps_length);
LOG_PRINTF(" footerLength = %zd\n", (size_t)ps.footerLength);
LOG_PRINTF(" compression = %d\n", ps.compression);
LOG_PRINTF(" compressionBlockSize = %d\n", ps.compressionBlockSize);
LOG_PRINTF(" version(%zd) = {%d,%d}\n", ps.version.size(),
(ps.version.size() > 0) ? (int32_t)ps.version[0] : -1,
(ps.version.size() > 1) ? (int32_t)ps.version[1] : -1);
LOG_PRINTF(" metadataLength = %zd\n", (size_t)ps.metadataLength);
LOG_PRINTF(" magic = \"%s\"\n", ps.magic.c_str());
}
void print_filefooter() const {
LOG_PRINTF("\n[+] FileFooter:\n");
LOG_PRINTF(" headerLength = %zd\n", ff.headerLength);
LOG_PRINTF(" contentLength = %zd\n", ff.contentLength);
LOG_PRINTF(" stripes (%zd entries):\n", ff.stripes.size());
for (size_t i = 0; i < ff.stripes.size(); i++) {
LOG_PRINTF(
" [%zd] @ %zd: %d rows, index+data+footer: %zd+%zd+%d bytes\n", i,
ff.stripes[i].offset, ff.stripes[i].numberOfRows,
ff.stripes[i].indexLength, ff.stripes[i].dataLength,
ff.stripes[i].footerLength);
}
LOG_PRINTF(" types (%zd entries):\n", ff.types.size());
for (size_t i = 0; i < ff.types.size(); i++) {
LOG_PRINTF(" column [%zd]: kind = %d, parent = %d\n", i,
ff.types[i].kind, ff.types[i].parent_idx);
if (ff.types[i].subtypes.size() > 0) {
LOG_PRINTF(" subtypes = ");
for (size_t j = 0; j < ff.types[i].subtypes.size(); j++) {
LOG_PRINTF("%c%d", (j) ? ',' : '{', ff.types[i].subtypes[j]);
}
LOG_PRINTF("}\n");
}
if (ff.types[i].fieldNames.size() > 0) {
LOG_PRINTF(" fieldNames = ");
for (size_t j = 0; j < ff.types[i].fieldNames.size(); j++) {
LOG_PRINTF("%c\"%s\"", (j) ? ',' : '{',
ff.types[i].fieldNames[j].c_str());
}
LOG_PRINTF("}\n");
}
}
if (ff.metadata.size() > 0) {
LOG_PRINTF(" metadata (%zd entries):\n", ff.metadata.size());
for (size_t i = 0; i < ff.metadata.size(); i++) {
LOG_PRINTF(" [%zd] \"%s\" = \"%s\"\n", i, ff.metadata[i].name.c_str(),
ff.metadata[i].value.c_str());
}
}
LOG_PRINTF(" numberOfRows = %zd\n", ff.numberOfRows);
LOG_PRINTF(" rowIndexStride = %d\n", ff.rowIndexStride);
}
public:
orc::PostScript ps;
orc::FileFooter ff;
std::vector<orc::StripeFooter> stripefooters;
std::unique_ptr<orc::OrcDecompressor> decompressor;
private:
datasource *const source;
};
/**
* @brief Struct that maps ORC streams to columns
**/
struct OrcStreamInfo {
OrcStreamInfo() = default;
explicit OrcStreamInfo(uint64_t offset_, size_t dst_pos_, uint32_t length_,
uint32_t gdf_idx_, uint32_t stripe_idx_)
: offset(offset_),
dst_pos(dst_pos_),
length(length_),
gdf_idx(gdf_idx_),
stripe_idx(stripe_idx_) {}
uint64_t offset; // offset in file
size_t dst_pos; // offset in memory relative to the beginning of the compressed stripe data
uint32_t length; // length in file
uint32_t gdf_idx; // gdf column index
uint32_t stripe_idx; // stripe index
};
size_t reader::Impl::gather_stream_info(
const size_t stripe_index, const orc::StripeInformation *stripeinfo,
const orc::StripeFooter *stripefooter, const std::vector<int> &orc2gdf,
const std::vector<int> &gdf2orc, const std::vector<orc::SchemaType> types,
bool use_index,
size_t *num_dictionary_entries,
hostdevice_vector<orc::gpu::ColumnDesc> &chunks,
std::vector<OrcStreamInfo> &stream_info) {
const auto num_columns = gdf2orc.size();
uint64_t src_offset = 0;
uint64_t dst_offset = 0;
for (const auto &stream : stripefooter->streams) {
if (stream.column >= orc2gdf.size()) {
dst_offset += stream.length;
continue;
}
auto col = orc2gdf[stream.column];
if (col == -1) {
// A struct-type column has no data itself, but rather child columns
// for each of its fields. There is only a PRESENT stream, which
// needs to be included for the reader.
const auto schema_type = types[stream.column];
if (schema_type.subtypes.size() != 0) {
if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) {
for (const auto &idx : schema_type.subtypes) {
auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1;
if (child_idx >= 0) {
col = child_idx;
auto &chunk = chunks[stripe_index * num_columns + col];
chunk.strm_id[orc::gpu::CI_PRESENT] = stream_info.size();
chunk.strm_len[orc::gpu::CI_PRESENT] = stream.length;
}
}
}
}
}
if (col != -1) {
if (src_offset >= stripeinfo->indexLength || use_index) {
// NOTE: skip_count field is temporarily used to track index ordering
auto &chunk = chunks[stripe_index * num_columns + col];
const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count,
col == orc2gdf[stream.column]);
if (idx.first < orc::gpu::CI_NUM_STREAMS) {
chunk.strm_id[idx.first] = stream_info.size();
chunk.strm_len[idx.first] = stream.length;
chunk.skip_count = idx.second;
if (idx.first == orc::gpu::CI_DICTIONARY) {
chunk.dictionary_start = *num_dictionary_entries;
chunk.dict_len =
stripefooter->columns[stream.column].dictionarySize;
*num_dictionary_entries +=
stripefooter->columns[stream.column].dictionarySize;
}
}
}
stream_info.emplace_back(stripeinfo->offset + src_offset, dst_offset,
stream.length, col, stripe_index);
dst_offset += stream.length;
}
src_offset += stream.length;
}
return dst_offset;
}
rmm::device_buffer reader::Impl::decompress_stripe_data(
const hostdevice_vector<orc::gpu::ColumnDesc> &chunks,
const std::vector<rmm::device_buffer> &stripe_data,
const orc::OrcDecompressor *decompressor,
std::vector<OrcStreamInfo> &stream_info, size_t num_stripes,
rmm::device_vector<orc::gpu::RowGroup> &row_groups,
size_t row_index_stride) {
// Parse the columns' compressed info
hostdevice_vector<orc::gpu::CompressedStreamInfo> compinfo(0, stream_info.size());
for (const auto &info : stream_info) {
compinfo.insert(orc::gpu::CompressedStreamInfo(
static_cast<const uint8_t *>(stripe_data[info.stripe_idx].data()) +
info.dst_pos,
info.length));
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(), compinfo.host_ptr(),
compinfo.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(ParseCompressedStripeData(
compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio()));
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(), compinfo.device_ptr(),
compinfo.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
// Count the exact number of compressed blocks
size_t num_compressed_blocks = 0;
size_t num_uncompressed_blocks = 0;
size_t total_decompressed_size = 0;
for (size_t i = 0; i < compinfo.size(); ++i) {
num_compressed_blocks += compinfo[i].num_compressed_blocks;
num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks;
total_decompressed_size += compinfo[i].max_uncompressed_size;
}
CUDF_EXPECTS(total_decompressed_size > 0, "No decompressible data found");
LOG_PRINTF(
"[+] Compression\n Total compressed size: %zd\n Number of "
"compressed blocks: %zd\n Codec: %d\n",
total_decompressed_size, num_compressed_blocks, decompressor->GetKind());
rmm::device_buffer decomp_data(align_size(total_decompressed_size));
rmm::device_vector<gpu_inflate_input_s> inflate_in(num_compressed_blocks + num_uncompressed_blocks);
rmm::device_vector<gpu_inflate_status_s> inflate_out(num_compressed_blocks);
// Parse again to populate the decompression input/output buffers
size_t decomp_offset = 0;
uint32_t start_pos = 0;
uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks;
for (size_t i = 0; i < compinfo.size(); ++i) {
auto dst_base = static_cast<uint8_t *>(decomp_data.data());
compinfo[i].uncompressed_data = dst_base + decomp_offset;
compinfo[i].decctl = inflate_in.data().get() + start_pos;
compinfo[i].decstatus = inflate_out.data().get() + start_pos;
compinfo[i].copyctl = inflate_in.data().get() + start_pos_uncomp;
stream_info[i].dst_pos = decomp_offset;
decomp_offset += compinfo[i].max_uncompressed_size;
start_pos += compinfo[i].num_compressed_blocks;
start_pos_uncomp += compinfo[i].num_uncompressed_blocks;
}
CUDA_TRY(cudaMemcpyAsync(compinfo.device_ptr(), compinfo.host_ptr(),
compinfo.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(ParseCompressedStripeData(
compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(),
decompressor->GetLog2MaxCompressionRatio()));
// Dispatch batches of blocks to decompress
if (num_compressed_blocks > 0) {
switch (decompressor->GetKind()) {
case orc::ZLIB:
CUDA_TRY(gpuinflate(inflate_in.data().get(), inflate_out.data().get(),
num_compressed_blocks, 0));
break;
case orc::SNAPPY:
CUDA_TRY(gpu_unsnap(inflate_in.data().get(), inflate_out.data().get(),
num_compressed_blocks));
break;
default:
CUDF_EXPECTS(false, "Unexpected decompression dispatch");
break;
}
}
if (num_uncompressed_blocks > 0) {
CUDA_TRY(gpu_copy_uncompressed_blocks(inflate_in.data().get() + num_compressed_blocks,
num_uncompressed_blocks));
}
CUDA_TRY(PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size()));
// Update the stream information with the updated uncompressed info
// TBD: We could update the value from the information we already
// have in stream_info[], but using the gpu results also updates
// max_uncompressed_size to the actual uncompressed size, or zero if
// decompression failed.
CUDA_TRY(cudaMemcpyAsync(compinfo.host_ptr(), compinfo.device_ptr(),
compinfo.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
const size_t num_columns = chunks.size() / num_stripes;
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
for (int k = 0; k < orc::gpu::CI_NUM_STREAMS; ++k) {
if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) {
chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data;
chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size;
}
}
}
}
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(ParseRowGroupIndex(row_groups.data().get(), compinfo.device_ptr(),
chunks.device_ptr(), num_columns, num_stripes,
row_groups.size() / num_columns,
row_index_stride));
}
return decomp_data;
}
void reader::Impl::decode_stream_data(
const hostdevice_vector<orc::gpu::ColumnDesc> &chunks, size_t num_dicts,
size_t skip_rows, const std::vector<int64_t> &timezone_table,
rmm::device_vector<orc::gpu::RowGroup> &row_groups, size_t row_index_stride,
const std::vector<gdf_column_wrapper> &columns) {
const size_t num_columns = columns.size();
const size_t num_stripes = chunks.size() / columns.size();
const size_t num_rows = columns[0]->size;
// Update chunks with pointers to column data
for (size_t i = 0; i < num_stripes; ++i) {
for (size_t j = 0; j < num_columns; ++j) {
auto &chunk = chunks[i * num_columns + j];
chunk.valid_map_base = reinterpret_cast<uint32_t *>(columns[j]->valid);
chunk.column_data_base = columns[j]->data;
chunk.dtype_len = (columns[j]->dtype == GDF_STRING)
? sizeof(std::pair<const char *, size_t>)
: cudf::size_of(columns[j]->dtype);
}
}
// Allocate global dictionary for deserializing
rmm::device_vector<orc::gpu::DictionaryEntry> global_dict(num_dicts);
// Allocate timezone transition table timestamp conversion
rmm::device_vector<int64_t> tz_table = timezone_table;
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(DecodeNullsAndStringDictionaries(
chunks.device_ptr(), global_dict.data().get(), num_columns, num_stripes,
num_rows, skip_rows));
CUDA_TRY(DecodeOrcColumnData(
chunks.device_ptr(), global_dict.data().get(), num_columns, num_stripes,
num_rows, skip_rows, tz_table.data().get(), tz_table.size(),
row_groups.data().get(), row_groups.size() / num_columns,
row_index_stride));
CUDA_TRY(cudaMemcpyAsync(chunks.host_ptr(), chunks.device_ptr(),
chunks.memory_size(), cudaMemcpyDeviceToHost));
CUDA_TRY(cudaStreamSynchronize(0));
LOG_PRINTF("[+] Decoded Column Information\n");
for (size_t i = 0; i < num_columns; ++i) {
for (size_t j = 0; j < num_stripes; ++j) {
columns[i]->null_count += chunks[j * num_columns + i].null_count;
}
LOG_PRINTF(
"columns[%zd].null_count = %d/%d (start_row=%d, nrows=%d, "
"strm_len=%d)\n",
i, columns[i]->null_count, columns[i]->size, chunks[i].start_row,
chunks[i].num_rows, chunks[i].strm_len[orc::gpu::CI_PRESENT]);
}
}
reader::Impl::Impl(std::unique_ptr<datasource> source,
reader_options const &options)
: source_(std::move(source)) {
// Open and parse the source Parquet dataset metadata
md_ = std::make_unique<OrcMetadata>(source_.get());
// Select only columns required by the options
selected_cols_ = md_->select_columns(options.columns, has_timestamp_column_);
// Override output timestamp resolution if requested
if (options.timestamp_unit != TIME_UNIT_NONE) {
timestamp_unit_ = options.timestamp_unit;
}
// Enable or disable attempt to use row index for parsing
use_index_ = options.use_index;
// Enable or disable the conversion to numpy-compatible dtypes
use_np_dtypes_ = options.use_np_dtypes;
}
table reader::Impl::read(int skip_rows, int num_rows, int stripe) {
// Select only stripes required (aka row groups)
const auto selected_stripes =
md_->select_stripes(stripe, skip_rows, num_rows);
const int num_columns = selected_cols_.size();
// Association between each ORC column and its gdf_column
std::vector<int32_t> orc_col_map(md_->get_num_columns(), -1);
// Initialize gdf_columns, but hold off on allocating storage space
std::vector<gdf_column_wrapper> columns;
LOG_PRINTF("[+] Selected columns: %d\n", num_columns);
for (const auto &col : selected_cols_) {
auto dtype_info =
to_dtype(md_->ff.types[col], use_np_dtypes_, timestamp_unit_);
// Map each ORC column to its gdf_column
orc_col_map[col] = columns.size();
columns.emplace_back(static_cast<cudf::size_type>(selected_stripes.size() > 0 ? num_rows : 0), dtype_info.first,
dtype_info.second, md_->ff.GetColumnName(col));
LOG_PRINTF(" %2zd: name=%s size=%zd type=%d data=%lx valid=%lx\n",
columns.size() - 1, columns.back()->col_name,
(size_t)columns.back()->size, columns.back()->dtype,
(uint64_t)columns.back()->data, (uint64_t)columns.back()->valid);
}
// Logically view streams as columns
std::vector<OrcStreamInfo> stream_info;
// Tracker for eventually deallocating compressed and uncompressed data
std::vector<rmm::device_buffer> stripe_data;
if (num_rows > 0 && selected_stripes.size() > 0) {
const auto num_column_chunks = selected_stripes.size() * num_columns;
hostdevice_vector<orc::gpu::ColumnDesc> chunks(num_column_chunks);
memset(chunks.host_ptr(), 0, chunks.memory_size());
const bool use_index =
(use_index_ == true) &&
// Only use if we don't have much work with complete columns & stripes
// TODO: Consider nrows, gpu, and tune the threshold
(num_rows > md_->get_row_index_stride() &&
!(md_->get_row_index_stride() & 7) && md_->get_row_index_stride() > 0 &&
num_columns * selected_stripes.size() < 8 * 128) &&
// Only use if first row is aligned to a stripe boundary
// TODO: Fix logic to handle unaligned rows
(skip_rows == 0);
size_t stripe_start_row = 0;
size_t num_dict_entries = 0;
size_t num_rowgroups = 0;
for (size_t i = 0; i < selected_stripes.size(); ++i) {
const auto stripe_info = selected_stripes[i].first;
const auto stripe_footer = selected_stripes[i].second;
auto stream_count = stream_info.size();
const auto total_data_size = gather_stream_info(
i, stripe_info, stripe_footer, orc_col_map, selected_cols_,
md_->ff.types, use_index, &num_dict_entries, chunks, stream_info);
CUDF_EXPECTS(total_data_size > 0, "Expected streams data within stripe");
stripe_data.emplace_back(align_size(total_data_size));
auto dst_base = static_cast<uint8_t *>(stripe_data.back().data());
// Coalesce consecutive streams into one read
while (stream_count < stream_info.size()) {
const auto d_dst = dst_base + stream_info[stream_count].dst_pos;
const auto offset = stream_info[stream_count].offset;
auto len = stream_info[stream_count].length;
stream_count++;
while (stream_count < stream_info.size() &&
stream_info[stream_count].offset == offset + len) {
len += stream_info[stream_count].length;
stream_count++;
}
const auto buffer = source_->get_buffer(offset, len);
CUDA_TRY(cudaMemcpyAsync(d_dst, buffer->data(), len,
cudaMemcpyHostToDevice));
CUDA_TRY(cudaStreamSynchronize(0));
}
// Update chunks to reference streams pointers
for (int j = 0; j < num_columns; j++) {
auto &chunk = chunks[i * num_columns + j];
chunk.start_row = stripe_start_row;
chunk.num_rows = stripe_info->numberOfRows;
chunk.encoding_kind = stripe_footer->columns[selected_cols_[j]].kind;
chunk.type_kind = md_->ff.types[selected_cols_[j]].kind;
chunk.decimal_scale = md_->ff.types[selected_cols_[j]].scale;
chunk.rowgroup_id = num_rowgroups;
if (chunk.type_kind == orc::TIMESTAMP) {
chunk.ts_clock_rate = to_clockrate(timestamp_unit_);
}
for (int k = 0; k < orc::gpu::CI_NUM_STREAMS; k++) {
if (chunk.strm_len[k] > 0) {
chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos;
}
}
}
stripe_start_row += stripe_info->numberOfRows;
if (use_index) {
num_rowgroups +=
(stripe_info->numberOfRows + md_->get_row_index_stride() - 1) /
md_->get_row_index_stride();
}
}
// Setup table for converting timestamp columns from local to UTC time
std::vector<int64_t> tz_table;
if (has_timestamp_column_ && !selected_stripes.empty()) {
CUDF_EXPECTS(BuildTimezoneTransitionTable(
tz_table, selected_stripes[0].second->writerTimezone),
"Cannot setup timezone LUT");
}
// Setup row group descriptors if using indexes
rmm::device_vector<orc::gpu::RowGroup> row_groups(num_rowgroups *
num_columns);
if (md_->ps.compression != orc::NONE) {
auto decomp_data = decompress_stripe_data(
chunks, stripe_data, md_->decompressor.get(), stream_info,
selected_stripes.size(), row_groups, md_->get_row_index_stride());
stripe_data.clear();
stripe_data.push_back(std::move(decomp_data));
} else {
if (not row_groups.empty()) {
CUDA_TRY(cudaMemcpyAsync(chunks.device_ptr(), chunks.host_ptr(),
chunks.memory_size(), cudaMemcpyHostToDevice));
CUDA_TRY(ParseRowGroupIndex(
row_groups.data().get(), nullptr, chunks.device_ptr(), num_columns,
selected_stripes.size(), num_rowgroups, md_->get_row_index_stride()));
}
}
for (auto &column : columns) {
column.allocate();
}
decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table,
row_groups, md_->get_row_index_stride(), columns);
// Perform any final column preparation (may reference decoded data)
for (auto &column : columns) {
column.finalize();
}
} else {
// Columns' data's memory is still expected for an empty dataframe
for (auto &column : columns) {
column.allocate();
column.finalize();
}
}
// Transfer ownership to raw pointer output arguments
std::vector<gdf_column *> out_cols(columns.size());
for (size_t i = 0; i < columns.size(); ++i) {
out_cols[i] = columns[i].release();
}
return cudf::table(out_cols.data(), out_cols.size());
}
reader::reader(std::string filepath, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(filepath), options)) {}
reader::reader(const char *buffer, size_t length, reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(buffer, length),
options)) {}
reader::reader(std::shared_ptr<arrow::io::RandomAccessFile> file,
reader_options const &options)
: impl_(std::make_unique<Impl>(datasource::create(file), options)) {}
table reader::read_all() { return impl_->read(0, -1, -1); }
table reader::read_rows(size_t skip_rows, size_t num_rows) {
return impl_->read(skip_rows, (num_rows != 0) ? (int)num_rows : -1, -1);
}
table reader::read_stripe(size_t stripe) { return impl_->read(0, -1, stripe); }
reader::~reader() = default;
} // namespace orc
} // namespace io
} // namespace cudf
|
226b2eb7f7ee08d81ea145c56358d73b7dce3d48.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "mergeHistogram256Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint *d_Histogram = NULL;
hipMalloc(&d_Histogram, XSIZE*YSIZE);
uint *d_PartialHistograms = NULL;
hipMalloc(&d_PartialHistograms, XSIZE*YSIZE);
uint histogramCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
mergeHistogram256Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Histogram,d_PartialHistograms,histogramCount);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
mergeHistogram256Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Histogram,d_PartialHistograms,histogramCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
mergeHistogram256Kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, d_Histogram,d_PartialHistograms,histogramCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 226b2eb7f7ee08d81ea145c56358d73b7dce3d48.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "mergeHistogram256Kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
uint *d_Histogram = NULL;
cudaMalloc(&d_Histogram, XSIZE*YSIZE);
uint *d_PartialHistograms = NULL;
cudaMalloc(&d_PartialHistograms, XSIZE*YSIZE);
uint histogramCount = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
mergeHistogram256Kernel<<<gridBlock,threadBlock>>>(d_Histogram,d_PartialHistograms,histogramCount);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
mergeHistogram256Kernel<<<gridBlock,threadBlock>>>(d_Histogram,d_PartialHistograms,histogramCount);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
mergeHistogram256Kernel<<<gridBlock,threadBlock>>>(d_Histogram,d_PartialHistograms,histogramCount);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
fa488675d68d0ba0ddcf695ac05828723e33f5b3.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include "freshman.h"
#define N 300000
__global__ void kernel_1()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_2()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_3()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_4()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
int main()
{
int n_stream=5;
hipStream_t *stream=(hipStream_t*)malloc(n_stream*sizeof(hipStream_t));
for(int i=0;i<n_stream;i++)
{
hipStreamCreate(&stream[i]);
}
dim3 block(1);
dim3 grid(1);
hipEvent_t start,stop;
hipEvent_t * event=(hipEvent_t *)malloc(n_stream*sizeof(hipEvent_t));
for(int i=0;i<n_stream;i++)
{
hipEventCreateWithFlags(&event[i],hipEventDisableTiming);
}
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start);
for(int i=0;i<n_stream;i++)
{
hipLaunchKernelGGL(( kernel_1), dim3(grid),dim3(block),0,stream[i], );
hipLaunchKernelGGL(( kernel_2), dim3(grid),dim3(block),0,stream[i], );
hipLaunchKernelGGL(( kernel_3), dim3(grid),dim3(block),0,stream[i], );
hipLaunchKernelGGL(( kernel_4), dim3(grid),dim3(block),0,stream[i], );
hipEventRecord(event[i],stream[i]);
hipStreamWaitEvent(stream[n_stream-1],event[i],0);
}
hipEventRecord(stop);
CHECK(hipEventSynchronize(stop));
float elapsed_time;
hipEventElapsedTime(&elapsed_time,start,stop);
for(int i=0;i<n_stream;i++)
{
hipStreamDestroy(stream[i]);
}
hipEventDestroy(start);
hipEventDestroy(stop);
for(int i=0;i<n_stream;i++)
{
hipEventDestroy(event[i]);
}
free(stream);
free(event);
return 0;
}
| fa488675d68d0ba0ddcf695ac05828723e33f5b3.cu | #include <cuda_runtime.h>
#include <stdio.h>
#include "freshman.h"
#define N 300000
__global__ void kernel_1()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_2()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_3()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
__global__ void kernel_4()
{
double sum=0.0;
for(int i=0;i<N;i++)
sum=sum+tan(0.1)*tan(0.1);
}
int main()
{
int n_stream=5;
cudaStream_t *stream=(cudaStream_t*)malloc(n_stream*sizeof(cudaStream_t));
for(int i=0;i<n_stream;i++)
{
cudaStreamCreate(&stream[i]);
}
dim3 block(1);
dim3 grid(1);
cudaEvent_t start,stop;
cudaEvent_t * event=(cudaEvent_t *)malloc(n_stream*sizeof(cudaEvent_t));
for(int i=0;i<n_stream;i++)
{
cudaEventCreateWithFlags(&event[i],cudaEventDisableTiming);
}
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start);
for(int i=0;i<n_stream;i++)
{
kernel_1<<<grid,block,0,stream[i]>>>();
kernel_2<<<grid,block,0,stream[i]>>>();
kernel_3<<<grid,block,0,stream[i]>>>();
kernel_4<<<grid,block,0,stream[i]>>>();
cudaEventRecord(event[i],stream[i]);
cudaStreamWaitEvent(stream[n_stream-1],event[i],0);
}
cudaEventRecord(stop);
CHECK(cudaEventSynchronize(stop));
float elapsed_time;
cudaEventElapsedTime(&elapsed_time,start,stop);
for(int i=0;i<n_stream;i++)
{
cudaStreamDestroy(stream[i]);
}
cudaEventDestroy(start);
cudaEventDestroy(stop);
for(int i=0;i<n_stream;i++)
{
cudaEventDestroy(event[i]);
}
free(stream);
free(event);
return 0;
}
|
b70920f37d81ddf5a85c9e79d243fc2accdf73e9.hip | // !!! This is a file automatically generated by hipify!!!
///sta programa calcula la versin paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/12/2016)
///sta versin sirve para graficar en matlab los tiempos de ejecucin, considerando N = (2^5 x 3^4 x 5^4), Li = 45 y Lo = vara
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hipfft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_complex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
hipfftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Nmero de elementos del vector de entrada
/// Li >>> Nmero de elementos de entrada diferentes de cero
/// Lo >>> Nmero de elementos de salida requeridos
/// loop >>> Nmero de iteraciones
/// muestras >>> Nmero de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el nmero de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 45;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Funcin principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
hipSetDevice(1);
hipGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_Li45_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_Li45_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
hipEvent_t start_app, stop_app;
hipEventCreate(&start_app);
hipEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
hipEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Clculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Funcin auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Funcin auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Funcin auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
hipEventRecord(stop_app,0);
hipEventSynchronize(stop_app);
hipEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
hipEventDestroy(start_app);
hipEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
hipFree(x_device);
hipFree(W_device);
hipFree(y_device);
hipFree(z_device);
hipFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < cont_1;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//sta funcin genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaracin de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//sta funcin genera el arreglo W
void arreglo_W(int N)
{
//Declaracin de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//sta funcin genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaracin de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el nmero de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//sta funcin encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//sta funcin encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Funcin auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,n1,n2;
//Asignacin de memoria en el device para el arreglo "x_device"
hipMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
hipMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignacin de memoria en el device para el arreglo "y"
hipMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
hipMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Envo de los arreglos W hacia la memoria global del device
hipMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),hipMemcpyHostToDevice);
//Asignacin de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la funcin kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
hipLaunchKernelGGL(( inputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
hipMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//funcin kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generacin de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Funcin auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignacin de memoria en el device para "z"
hipMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignacin de memoria en el device para "in" y "out"
hipMalloc((void**)&in,sizeof(hipfftComplex)*P*Dip*Dop);
hipMalloc((void**)&out,sizeof(hipfftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
hipMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se crea un plan
hipfftHandle plan;
hipfftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,HIPFFT_C2C,Dip*Dop);
//Ejecucin del plan
hipfftExecC2C(plan,in,out,HIPFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
hipMemcpy(z_device,out,sizeof(hipfftComplex)*P*Dip*Dop,hipMemcpyDeviceToDevice);
//Se destruye el plan
hipfftDestroy(plan);
//Se liberan los arreglos "in" y "out"
hipFree(in);
hipFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
hipMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,hipMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Funcin auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaracin de variables locales
int m;
//Asignacin de memoria en el device para "X"
hipMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignacin de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la funcin kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
hipLaunchKernelGGL(( outputStage_kernel), dim3(gridDim),dim3(blockDim), 0, 0, N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
hipDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
hipMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,hipMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//funcin kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaracin de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Clculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Clculo de X(k) para 0<=k<=Dip-1.
//En la descomposicin k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el mtodo directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el mtodo filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} | b70920f37d81ddf5a85c9e79d243fc2accdf73e9.cu | ///Ésta programa calcula la versión paralelizada del algoritmo FFT_DIF_DIT_TD
///(29/12/2016)
///Ésta versión sirve para graficar en matlab los tiempos de ejecución, considerando N = (2^5 x 3^4 x 5^4), Li = 45 y Lo = varía
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cufft.h>
#include <cufftw.h>
#include <stdio.h>
#include <stdlib.h>
#include <cuComplex.h>
#include <math.h>
#include <math_constants.h>
#include <iostream>
#include <time.h>
//////////////////////////////////////////////////////////////////////////
///////////////////////DECLARACIÓN DE FUNCIONES///////////////////////////
//////////////////////////////////////////////////////////////////////////
void vector_entrada_xn(int Li, int N);
void arreglo_W(int N);
void asign_rap(int N,int Li,int Lo);
void factor(int N);
void product(int vector_1[500],int vector_2[500],int valor);
void etapa_entrada(void);
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y);
void etapa_intermedia(void);
void etapa_salida(void);
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X);
//////////////////////////////////////////////////////////////////////////
/////////////////////DECLARACIÓN DE VARIABLES GLOBALES////////////////////
//////////////////////////////////////////////////////////////////////////
cuFloatComplex *x_host;
cuFloatComplex *W_host;
//cuFloatComplex *y_host;
//cuFloatComplex *z_host;
cuFloatComplex *X_host;
cuFloatComplex *x_device;
cuFloatComplex *W_device;
cuFloatComplex *y_device;
cuFloatComplex *z_device;
cuFloatComplex *X_device;
cufftComplex *in,*out;
FILE *db_open,*dc_open;
int Dip,Dop,P,N,Li,Lo;
int vF[500]; //Almacena los factores de N
int svF; //Almacena el numero de factores de N
int Prod[500];
int a;
#define inf 99999
//////////////////////////////////////////////////////////////////////////
//////////////////////////DATOS DE ENTRADA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// N >>> Número de elementos del vector de entrada
/// Li >>> Número de elementos de entrada diferentes de cero
/// Lo >>> Número de elementos de salida requeridos
/// loop >>> Número de iteraciones
/// muestras >>> Número de muestras
//////////////////////////////////////////////////////////////////////////
///////////////////////////DATOS DE SALIDA////////////////////////////////
//////////////////////////////////////////////////////////////////////////
/// X >>> Vector de salida
//////////////////////////////////////////////////////////////////////////
/////////////////// SE INGRESAN LOS DATOS DE ENTRADA /////////////////////
//////////////////////////////////////////////////////////////////////////
///Ingrese el número de iteraciones requeridas
const int loop = 300;
///Ingrese el valor de n_max, m_max y l_max (N = (2^n_max x 3^m_max x 5^l_max))
const int n_max = 5;
const int m_max = 4;
const int l_max = 4;
///Ingrese el valor de Li_max
const int Li_max = 45;
//////////////////////////////////////////////////////////////////////////
//////////////////////////FUNCION PRINCIPAL///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Función principal
int main()
{
//////////////////////////////////////////////////////////////////////////
//////////////////////////SELECCIÓN DEL DEVICE////////////////////////////
//////////////////////////////////////////////////////////////////////////
int device;
FILE *da;
cudaSetDevice(1);
cudaGetDevice(&device);
if(device == 0)
{
printf("\n\n---DEVICE = GeForce GTX 970---\n\n");
da = fopen("Tiempos_NCompuesta_Li45_LoVARIA_CUDA_GTX970.bin","a+b"); //Crea o sobre escribe archivo
}
if(device == 1)
{
printf("\n\n---DEVICE = TESLA K20---\n\n");
da = fopen("Tiempos_NCompuesta_Li45_LoVARIA_CUDA_TESLAK20c.bin","a+b"); //Crea o sobre escribe archivo
}
//////////////////////////////////////////////////////////////////////////
int i,j,i_N,j_res,k_res,cont,i_prom;
float suma;
float promedio[13];
int cont_1,n_1,m_1,l_1,m_ant,l_ant;
cont_1 = 0;
m_ant = 0;
l_ant = 0;
//Pausa
printf("\n---PRESIONA UNA TECLA PARA CONTINUAR---\n\n");
getchar();
for(i_N = 1;i_N <= 1;i_N++)
{
N = (int )((pow(2,n_max))*(pow(3,m_max))*(pow(5,l_max)));
printf("\n N = %d \n",N);
for(j_res=Li_max;j_res <= Li_max;j_res++)
{
Li=j_res;
for(n_1 = 1;n_1 <= n_max;n_1++)
{
for(m_1 = m_ant; m_1 <= n_1;m_1++)
{
m_ant = m_1;
for(l_1 = l_ant;l_1 <= m_1;l_1++)
{
l_ant = l_1;
if((m_1 <= m_max) && (l_1 <= l_max))
{
Lo = (int )((pow(2,n_1))*(pow(3,m_1))*(pow(5,l_1)));
cont_1++;
printf("\n Li = %d Lo = %d",Li,Lo);
///Se abre el archivo binario
db_open = fopen("Entrada_real_NCompuesta_C.bin","rb");
dc_open = fopen("Entrada_imag_NCompuesta_C.bin","rb");
suma=0.0;
for(j=0;j<loop;j++)
{
//Comandos necesarios para medir el tiempo
float elapsedTime_app;
cudaEvent_t start_app, stop_app;
cudaEventCreate(&start_app);
cudaEventCreate(&stop_app);
//Se generan en el host los valores del vector de entrada x[n]
vector_entrada_xn(Li,N);
///Se genera el arreglo W[N]
arreglo_W(N);
//---------------------------------------------------------------------------------------------
//Se empieza a medir el tiempo de ejecucion de la aplicacion
cudaEventRecord(start_app,0);
//Se generan en el host los factores Dip y Dop
asign_rap(N,Li,Lo);
//Cálculo en el host del factor P
P = N/(Dip*Dop);
//printf("\n\n FACTOR P:\n\n");
//printf("\n Dip = %d Dop = %d P = %d ",Dip,Dop,P);
//Función auxiliar del host para ejecutar la etapa de entrada
etapa_entrada();
//Función auxiliar del host para ejecutar la etapa intermedia
etapa_intermedia();
//Función auxiliar del host para ejecutar la etapa de salida
etapa_salida();
//---------------------------------------------------------------------------------------------
//Comandos necesarios para medir el tiempo de la aplicacion (app)
cudaEventRecord(stop_app,0);
cudaEventSynchronize(stop_app);
cudaEventElapsedTime(&elapsedTime_app,start_app,stop_app);
//Suma de todos los tiempos
suma = suma + elapsedTime_app;
//Se destruyen los eventos que miden el tiempo de la aplicacion
cudaEventDestroy(start_app);
cudaEventDestroy(stop_app);
//Se liberan memorias del Host y Device
free(x_host);
free(W_host);
free(X_host);
cudaFree(x_device);
cudaFree(W_device);
cudaFree(y_device);
cudaFree(z_device);
cudaFree(X_device);
}
promedio[cont_1-1] = suma/(float)loop;
fclose(db_open);
fclose(dc_open);
}
}
}
}
}
}
fwrite(promedio,sizeof(float),13,da);
printf("\n\nTIEMPOS:\n\n");
int time_print;
for(time_print = 0;time_print < cont_1;time_print++)
{
printf("\nTime (%d)= %f ms",time_print,promedio[time_print]);
}
fclose(da);
}
//////////////////////////////////////////////////////////////////////////
/////////////////////////FUNCIONES SECUNDARIAS////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Ésta función genera el vector de entrada x[n]
void vector_entrada_xn(int Li, int N)
{
//Declaración de variables locales
int k;
float *buffer_real,*buffer_imag;
//Se reserva memoria para xn_host en el host
x_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Li);
buffer_real = (float*)malloc(sizeof(float)*N);
buffer_imag = (float*)malloc(sizeof(float)*N);
///Se lee el vector de entrada del archivo binario
fread(buffer_real,sizeof(float),N,db_open);
fread(buffer_imag,sizeof(float),N,dc_open);
//Se dan valores a x[n]
for(k = 0;k < Li; k++)
{
//x_host[k] = make_cuFloatComplex((float)(rand()%11),(float)(rand()%11));
//x_host[k] = make_cuFloatComplex((float)(k + 1),(float)(0.0));
x_host[k] = make_cuFloatComplex(buffer_real[k],buffer_imag[k]);
}
/*
//Se imprimen los valores de entrada x[n]
printf("\n---ELEMENTOS DE ENTRADA x[n]---\n\n");
for(k=0;k<Li;k++)
{
printf(" %d-> (%f) + (%f)\n",k+1,cuCrealf(x_host[k]),cuCimagf(x_host[k]));
}
*/
free(buffer_real);
free(buffer_imag);
}
//Ésta función genera el arreglo W
void arreglo_W(int N)
{
//Declaración de variables locales
int n;
//Se reserva memoria para W_host en el host
W_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*N);
//Se genera el arreglo W
for(n = 1;n <= N;n++)
{
W_host[n-1] = make_cuFloatComplex((float)cos((2*CUDART_PI*n)/N),(float)(-1)*sin((2*CUDART_PI*n)/N));
}
/*
//Se imprimen los valores del arreglo W[N]
printf("\n---ARREGLO W[N]---\n\n");
for(n = 0;n < N; n++)
{
printf(" W[%d]-> (%f) + (%f)\n",n+1,cuCrealf(W_host[n]),cuCimagf(W_host[n]));
}
*/
}
//Ésta función genera los factores Dip y Dop
void asign_rap(int N,int Li,int Lo)
{
//Declaración de variables locales
float NLi,NLo,Diprapt,Doprapt;
int Nh[500];
int k[500];
int G;
int g,i,t,ta;
int Dipt[500],Dopt[500];
float distrapt,distrap;
int Pos,h,Poss;
int nk[500];
int r;
//Inicializaciones
G = 0;
svF = 0;
//Factores Dip y Dop ideales
NLi=(float)N/(float)Li;
NLo=(float)N/(float)Lo;
Diprapt=NLi;
Doprapt=NLo;
//Se encuentran los factores de "N"
//vF almacena los factores de "N"
//svF almacena el número de factores de "N"
factor(N);
//printf("\n ERROR \n");
/*
Almacena en el vector Nh los factores que son diferentes de del vector vF
En el vector k se almacena la cantidad de veces que se repite cada
elemento almacenado en el vector Nh.
*/
Nh[0] = vF[0];
k[0]=1;
for(g=1;g<=svF-1;g=g+1)
{
if(vF[g]!=vF[g-1])
{
G=G+1;
Nh[G]=vF[g];
k[G]=1;
}
else
{
k[G]=k[G]+1;
}
}
/*
Almacena en el vector Nh todas las posibles combinaciones que den como
producto a N. t almacena el numero de elementos del vector Nh.
*/
product(Nh,k,G);
t = a;
for(i=0;i<t;i=i+1)
{
Dipt[i]=Prod[i];
}
distrapt=inf;
for(g=1;g<=t;g=g+1)
{
if(Dipt[g-1]<=NLi)
{
Pos=g-1;
for(h=0;h<=G;h=h+1)
{
Poss=floor(Pos/(k[h]+1));
nk[h]=k[h]+Poss*(k[h]+1)-Pos;
Pos=Poss;
}
product(Nh,nk,G);
ta=a;
for(i=0;i<ta;i=i+1)
{
Dopt[i]=Prod[i];
}
////////////////////////////////////////////
//int j;
//for(j=0;j<ta;j++)
//{
// printf(" %d ",Dopt[j]);
//}
//printf("\n\n ta=%d\n\n",ta);
///////////////////////////////////////////
for(r=0;r<ta;r=r+1)
{
distrap=sqrt(pow(Diprapt-(Dipt[g-1]),2)+pow(Doprapt-(Dopt[r]),2));
if(distrap<distrapt)
{
distrapt=distrap;
Dip=Dipt[g-1];
Dop=Dopt[r];
}
}
}
}
/*
printf("\n\n FACTOR Dip :\n\n");
printf(" %d ",Dip);
printf("\n\n FACTOR Dop:\n\n");
printf(" %d ",Dop);
*/
}
//Ésta función encuentra los factores de "N"
void factor(int N)
{
//Se empieza a verificar los factores desde 2
int i=2;
long N_factor;
N_factor = N;
while(i<=N_factor)
{
while((N_factor%i)==0)
{
vF[svF]=i;
N_factor=N_factor/i;
// printf("Factores: %d ",vF[svF]);
svF++;
}
i++;
}
}
//Ésta función encuentra todas las posibles combinaciones de factores que den como resultado "N"
void product(int vector_1[500],int vector_2[500],int valor)
{
int d,e,s,pNh,i;
int cont=0;
Prod[0]=1;
a=1;
for(d=0;d<=valor;d=d+1)
{
s=a;
pNh=1;
for(e=1;e<=vector_2[d];e=e+1)
{
pNh=pNh*vector_1[d];
for(i=(s*e+1);i<=(s*e+s);i=i+1)
{
Prod[i-1]=pNh*Prod[cont];
cont=cont+1;
}
a=a+s;
cont=0;
}
}
}
//Función auxiliar del host para calcular la etapa de entrada en el device
void etapa_entrada(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE ENTRADA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,n1,n2;
//Asignación de memoria en el device para el arreglo "x_device"
cudaMalloc((void**)&x_device,Li*sizeof(cuFloatComplex));
//Se reserva memoria en el device para el arreglo "W_device"
cudaMalloc((void**)&W_device,N*sizeof(cuFloatComplex));
//Asignación de memoria en el device para el arreglo "y"
cudaMalloc((void**)&y_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Se pasa el arreglo x_host a x_device
cudaMemcpy(x_device,x_host,Li*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Envío de los arreglos W hacia la memoria global del device
cudaMemcpy(W_device,W_host,N*sizeof(cuFloatComplex),cudaMemcpyHostToDevice);
//Asignación de memoria en el host para "y"
//y_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Dimensionamiento del grid para la función kernel "inputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((P*Dop) < 32 && (Dip) < 32)
{
blockDim.x = (P*Dop);
blockDim.y = (Dip);
gridDim.x = 1;
gridDim.y = 1;
}
else
{
blockDim.x = 32;
blockDim.y = 32;
gridDim.x = (unsigned int) (ceilf((float)(P*Dop)/(float)blockDim.x));
gridDim.y = (unsigned int) (ceilf((float)Dip/(float)blockDim.y));
}
//Lanzamiento del kernel "inputStage_kernel"
inputStage_kernel<<<gridDim,blockDim>>>(N,Li,Dip,Dop,P,x_device,W_device,y_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
/*
//Copia del arreglo "y" del device hacia el host
cudaMemcpy(y_host,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
//Se imprimen los valores de "y"
printf("\n\n--- ARREGLO y(n1,n2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(n2 = 0;n2 < P;n2++)
{
printf(" (%f) + (%f) ",cuCrealf(y_host[(k1*Dop*P)+(n1*P)+n2]),cuCimagf(y_host[(k1*Dop*P)+(n1*P)+n2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//función kernel que ejecuta la etapa de entrada en el device
__global__ void inputStage_kernel(int N, int Li,int Dip,int Dop,int P,cuFloatComplex *x,cuFloatComplex *W,cuFloatComplex *y)
{
int n1,n2;
cuFloatComplex t1;
//Threads
int n = blockDim.x *blockIdx.x + threadIdx.x;
int k1 = blockDim.y *blockIdx.y + threadIdx.y;
//Se resetean las flags
//flag_inputstage_1_d[0] = 0;
//flag_inputstage_2_d[0] = 0;
//flag_inputstage_3_d[0] = 0;
//printf("\n n = %d k1 = %d",n,k1);
if( (n < (P*Dop)) && (k1 < Dip))
{
n2 = floorf(n/Dop);
n1 = n - (Dop*n2);
//Generación de los elementos que dependen de x[0]
if(n == 0)
{
y[(k1*Dop*P)+(0*P)+ 0] = x[0];
///Flag
//flag_inputstage_1_d[0] = 1;
}
//Mapeo de x[n] a las entradas del primer conjunto de Dop DFT's
if((n >= 1) && (n <= (Li-1)))
{
t1 = x[n];
if(k1 == 0)
{
y[(0*Dop*P)+(n1*P)+ n2] = t1;
}
if(k1 >= 1)
{
y[(k1*Dop*P)+(n1*P)+ n2] = cuCmulf(W[((n*k1)%N)-1],t1);
}
///Flag
//flag_inputstage_2_d[0] = 1;
}
//Rellenado de ceros para los elementos de "y" para Li <= n <= (P*Dop)-1
if((n >= Li) && (n <= (P*Dop)-1))
{
y[(k1*Dop*P)+(n1*P)+ n2] = make_cuFloatComplex(0.0,0.0);
///Flag
//flag_inputstage_3_d[0] = 1;
}
//printf("\n (%f) + (%f)\n ",cuCrealf(y[(k1*Dop*P)+(n1*P)+ n2]),cuCimagf(y[(k1*Dop*P)+(n1*P)+ n2]));
}
}
//Función auxiliar del host para calcular la etapa intermedia en el device
void etapa_intermedia(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA INTERMEDIA//////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int k1,k2,n1;
int n[1] = {P};
int inembed[1] = {P};
int onembed[1] = {P};
//Asignación de memoria en el device para "z"
cudaMalloc((void**)&z_device,P*Dip*Dop*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "z"
//z_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*P*Dip*Dop);
//Asignación de memoria en el device para "in" y "out"
cudaMalloc((void**)&in,sizeof(cufftComplex)*P*Dip*Dop);
cudaMalloc((void**)&out,sizeof(cufftComplex)*P*Dip*Dop);
//Se copia el arreglo "y" al arreglo "in"
cudaMemcpy(in,y_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se crea un plan
cufftHandle plan;
cufftPlanMany(&plan,1,n,inembed,1,P,onembed,1,P,CUFFT_C2C,Dip*Dop);
//Ejecución del plan
cufftExecC2C(plan,in,out,CUFFT_FORWARD);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Se copian los datos del arreglo "out" al arreglo "z_device"
cudaMemcpy(z_device,out,sizeof(cufftComplex)*P*Dip*Dop,cudaMemcpyDeviceToDevice);
//Se destruye el plan
cufftDestroy(plan);
//Se liberan los arreglos "in" y "out"
cudaFree(in);
cudaFree(out);
/*
//Se copian los datos del arreglo "z_device" al arreglo "z_host"
cudaMemcpy(z_host,z_device,sizeof(cuFloatComplex)*P*Dip*Dop,cudaMemcpyDeviceToHost);
///Se imprimen los valores de z(n1,k2,k1)
printf("\n\n--- ARREGLO z(n1,k2,k1) ---\n\n");
for(k1 = 0;k1 < Dip;k1++)
{
for(n1 = 0;n1 < Dop;n1++)
{
for(k2 = 0;k2 < P;k2++)
{
printf(" (%f) + (%f) ",cuCrealf(z_host[(k1*Dop*P)+(n1*P)+k2]),cuCimagf(z_host[(k1*Dop*P)+(n1*P)+k2]));
}
printf("\n");
}
printf("\n\n");
}
printf("\n");
*/
}
//Función auxiliar del host para calcular la etapa de salida en el device
void etapa_salida(void)
{
//////////////////////////////////////////////////////////////////////////
////////////////////////////ETAPA DE SALIDA///////////////////////////////
//////////////////////////////////////////////////////////////////////////
//Declaración de variables locales
int m;
//Asignación de memoria en el device para "X"
cudaMalloc((void**)&X_device,Lo*sizeof(cuFloatComplex));
//Asignación de memoria en el host para "X"
X_host = (cuFloatComplex*)malloc(sizeof(cuFloatComplex)*Lo);
//Dimensionamiento del grid para la función kernel "outputStage"
//Dimensionamiento del Grid
dim3 gridDim(1,1,1);
//Dimensionamiento del block
dim3 blockDim(1,1,1);
if((Lo) < 1024)
{
blockDim.x = Lo;
gridDim.x = 1;
}
else
{
blockDim.x = 1024;
gridDim.x = (unsigned int) (ceilf((float)Lo/(float)blockDim.x));
}
//Lanzamiento del kernel "outputStage_kernel"
outputStage_kernel<<<gridDim,blockDim>>>(N,Lo,Dip,Dop,P,z_device,W_device,X_device);
//Esperar que el kernel termine de ejecutarse totalmente
cudaDeviceSynchronize();
//Copia del arreglo "X" del device hacia el host
cudaMemcpy(X_host,X_device,sizeof(cuFloatComplex)*Lo,cudaMemcpyDeviceToHost);
/*
//Se imprimen los valores de "X_host"
///Imprimir X[k]
printf("\n\n--- ARREGLO X[k] ---\n\n");
for(m=0;m<=Lo-1;m++)
{
printf("\n X[%d] = %.4f + (%.4f)",m,cuCrealf(X_host[m]),cuCimagf(X_host[m]));
//fprintf(da,"%.4f %.4f\n",creal(X[i]),cimag(X[i]));
}
*/
}
//función kernel que ejecuta la etapa de salida en el device
__global__ void outputStage_kernel(int N,int Lo,int Dip,int Dop,int P,cuFloatComplex *z,cuFloatComplex *W,cuFloatComplex *X)
{
//Declaración de variables locales
int n1,k_aux,k1,k2,a,b;
cuFloatComplex t1,t2,t3,t4,t5;
//Threads
int k = blockDim.x *blockIdx.x + threadIdx.x;
//Se resetean las flags
//flag_outputstage_1_d[0] = 0;
//flag_outputstage_2_d[0] = 0;
//flag_outputstage_3_d[0] = 0;
if(k < Lo)
{
for(n1 = 0; n1 <= (Dop-1); n1 = n1+1)
{
if(Lo <= Dip)
{
//Cálculo de X(k) para 0<=k<=Lo-1.
//printf("\n--- Caso (Lo <= Dip) ---\n");
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
///Flag
//flag_outputstage_1_d[0] = 1;
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
///Flag
//flag_outputstage_1_d[0] = 1;
}
}
else
{
if((k >= 0) && (k <= (Dip-1)))
{
//Cálculo de X(k) para 0<=k<=Dip-1.
//En la descomposición k = k1 + Dipk2; k2 = 0, y por lo tanto, k = k1
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
else
{
if(n1 == 1)
{
X[k] = z[(k*Dop*P)+(0*P) + 0];
}
X[k] = cuCaddf(z[(k*Dop*P)+(n1*P) + 0],X[k]);
}
}
else
{
if(Dop <= 4)
{
//Usando el método directo
//printf("\n--- Caso (Metodo directo) ---\n");
if(n1 == 0) //Caso para lograr que por lo menos ingrese una vez
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
///Flag
//flag_outputstage_2_d[0] = 1;
}
else
{
if(n1 == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
X[k] = z[(k1*Dop*P)+(0*P)+ (k2%P)];
}
a = floorf(k/(Dip*P));
X[k] = cuCaddf(X[k],cuCmulf(z[(k1*Dop*P)+(n1*P)+ (k2%P)],W[((n1*(k2+P*(a))*Dip)%N)-1]));
///Flag
//flag_outputstage_2_d[0] = 1;
}
}
else
{
//Usando el método filtering 2BF
//printf("\n--- Caso (Filtro 2BF) ---\n");
if((Dop-2) >= 1)
{
if(n1 == 0)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
///Flag
//flag_outputstage_3_d[0] = 1;
}
if((n1 >= 1) && (n1 <= (Dop-2)))
{
t2 = t1;
t1 = cuCaddf(z[(k1*Dop*P)+((-(n1-(Dop-1)))*P)+ (k2%P)],t4);
t3 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t4 = cuCsubf(t3,t2);
}
if(n1 == (Dop-1))
{
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
}
}
else
{
if(Dop == 1)
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
X[k] = t1;
///Flag
//flag_outputstage_3_d[0] = 1;
}
else
{
k_aux = k-((Dip*P)*floorf(k/(Dip*P)));
k2 = floorf(k_aux/Dip);
k1 = k_aux-(Dip*k2);
t1 = z[(k1*Dop*P)+((Dop-1)*P)+ (k2%P)];
b = floorf(k/(Dip*P));
t4 = cuCmulf(t1,make_cuFloatComplex(2*cuCrealf(W[(((k2+P*(b))*Dip)%N)-1]),0.0));
t5 = cuCaddf(z[(k1*Dop*P)+(0*P)+ (k2%P)],t4);
X[k] = cuCsubf(t5,cuCmulf(t1,cuConjf(W[(((k2+P*(b))*Dip)%N)-1])));
///Flag
//flag_outputstage_3_d[0] = 1;
}
}
}
}
}
}
}
} |
98f27e97a5a420686b98344d07553d8d91c0df43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// Ray.cpp
// RayTracer
//
// Created by Bo Miller on 1/2/19.
// Copyright 2019 Bo Miller. All rights reserved.
//LDFLAGS= -L./glm/glm -glm
#include "glm/glm/glm.hpp"
#include "glm/glm/gtx/io.hpp"
#include <iostream>
#include "Ray.hpp"
#include <vector>
#include <atomic>
#include <mutex>
#include <math.h>
#define PI 3.14159265359
#include <pthread.h>
#include <chrono>
#include "bvh.hpp"
#include <random>
#include <queue>
#include "isect.hpp"
float RAY_EPSILON = 0.000000001;
int antialiasing = 0;
int numBounces = 2;
int SampPerPix = 4;
__global__ void GeneratePrimaryRays(Ray* rays, int n, glm::vec3 L, glm::vec3 u, glm::vec3 v, glm::vec3 cameraPosition)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
int ipix = i%720;
int jpix = i/720;
glm::vec3 pix = (L+u*float(ipix)+v*float(jpix));
float xoffset = 0;
float yoffset = 0;
glm::vec3 sample = glm::normalize(glm::vec3(pix[0]+xoffset,pix[1]+yoffset,pix[2])-cameraPosition);
rays[i].raytype = 0;
rays[i].position = cameraPosition;
rays[i].direction = sample;
rays[i].i = ipix;
rays[i].j = jpix;
rays[i].color = glm::vec3(0,0,0);
rays[i].surfaceReflectiveCoef = glm::vec3(0,0,0);
}
}
__device__ bool boundingBoxIntersection(glm::vec3& position, glm::vec3& direction, Node* node)
{
float tmin = (node->minX-position[0])/direction[0];
float tmax = (node->maxX-position[0])/direction[0];
if(tmin>tmax)
{
float temp = tmin;
tmin = tmax;
tmax = temp;
}
float tymin = (node->minY-position[1])/direction[1];
float tymax = (node->maxY-position[1])/direction[1];
if(tymin>tymax)
{
float temp = tymin;
tymin = tymax;
tymax = temp;
}
if((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = (node->minZ-position[2])/direction[2];
float tzmax = (node->maxZ-position[2])/direction[2];
if (tzmin > tzmax)
{
float temp = tzmin;
tzmin = tzmax;
tzmax = temp;
}
if ((tmin > tzmax) || (tzmin > tmax))
return false;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return true;
}
__device__ bool intersectSphere(SceneObject& s, glm::vec3& position, glm::vec3& direction, float& isectT, glm::vec3& normal, glm::vec3& intersection)
{
float RAY_EPSILON = 0.000000001;
float a = glm::dot(direction, direction);
float b = 2 * glm::dot(direction,position-s.position);
float c = glm::dot(s.position,s.position) + glm::dot(position,position) + (-2 * glm::dot(s.position,position)) - pow(s.radius,2);
float discriminant = b*b - 4*a*c;
if(discriminant > 0.0+RAY_EPSILON)
{
float t = (-b - sqrt(discriminant))/(2*a);
float t2 = (-b + sqrt(discriminant))/(2*a);
if(t2>RAY_EPSILON)
{
//we know we have some intersection
if( t > RAY_EPSILON )
{
isectT = t;
}
else
{
isectT = t2;
}
intersection = position+t*direction;
normal = glm::normalize((intersection-s.position)/s.radius);
return true;
}
}
return false;
}
__device__ bool intersectTriangle(SceneObject& s, glm::vec3& position, glm::vec3& direction, glm::vec3& n, glm::vec3& intersection, float& time)
{
glm::vec3 normal = glm::normalize(glm::cross((s.v2-s.v1),(s.v3-s.v1)));
float denom = glm::dot(normal,direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((s.v1-position),normal)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = position+t*direction;
float test1 = glm::dot(glm::cross((s.v2-s.v1),(intersect-s.v1)),normal);
float test2 = glm::dot(glm::cross((s.v3-s.v2),(intersect-s.v2)),normal);
float test3 = glm::dot(glm::cross((s.v1-s.v3),(intersect-s.v3)),normal);
if(test1 >= 0.0 && test2 >= 0.0 && test3 >= 0.0)
{
glm::vec3 v0 = s.v2 - s.v1;
glm::vec3 v1 = s.v3 - s.v1;
glm::vec3 v2 = intersect - s.v1;
float d00 = glm::dot(v0, v0);
float d01 = glm::dot(v0, v1);
float d11 = glm::dot(v1, v1);
float d20 = glm::dot(v2, v0);
float d21 = glm::dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float v = (d11 * d20 - d01 * d21) / denom;
float w = (d00 * d21 - d01 * d20) / denom;
float u = 1.0f - v - w;
n = glm::normalize(u*s.v1Norm+v*s.v2Norm+w*s.v3Norm);
//n = -normal;
intersection = intersect;
time = t;
return true;
}
}
}
return false;
}
__device__ void bvhTraverse(glm::vec3& position, glm::vec3& direction, Node* currentNode,bool& intersect,float& minT, SceneObject& intersectObj, glm::vec3& minTnormal, glm::vec3& minTintersection)
{
if(currentNode->isleaf)
{
if(boundingBoxIntersection(position, direction, currentNode))
{
for(int i = 0; i<currentNode->numObjs;i++)
{
if(currentNode->objs[i]->sphere)
{
float iTime;
glm::vec3 normal;
glm::vec3 intersection;
if(intersectSphere(*currentNode->objs[i], position, direction, iTime, normal, intersection))
{
if(iTime<minT)
{
minTnormal = normal;
minTintersection = intersection;
intersectObj = *currentNode->objs[i];
minT = iTime;
intersect = true;
}
}
}
else if(currentNode->objs[i]->triangle)
{
float intersectT;
glm::vec3 normal;
glm::vec3 intersection;
if(intersectTriangle(*currentNode->objs[i], position, direction, normal, intersection, intersectT))
{
if(intersectT<minT)
{
minTnormal = normal;
minTintersection = intersection;
intersectObj = *currentNode->objs[i];
minT = intersectT;
intersect = true;
}
}
}
}
}
}
else
{
if(boundingBoxIntersection(position, direction, currentNode->left))
bvhTraverse(position, direction,currentNode->left,intersect,minT,intersectObj,minTnormal,minTintersection);
if(boundingBoxIntersection(position, direction, currentNode->right))
bvhTraverse(position, direction,currentNode->right,intersect,minT,intersectObj,minTnormal,minTintersection);
}
}
__device__ bool wallIntersection(Isect& ipoint, Ray& r, Ray& reflect, Node* root)
{
float x = 200.0f;
float y = 200.0f;
float z = 300.0f;
glm::vec3 up = glm::vec3(0,1,0);
float denom = glm::dot(up,r.direction);
if(fabsf(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,-y,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]<z && intersect[0]>-x && intersect[0] < x)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1.0, 0.2, 0.2);;
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef* glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(1.0, 0.2, 0.2);
ipoint.ambient = glm::vec3(1.0, 0.2, 0.2);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//left wall
up = glm::vec3(1,0,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(-x,0,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]< z && intersect[1] < y && intersect[1] > -y)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.2, 0.2, 1.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.2, 0.2, 1.0);
ipoint.ambient = glm::vec3(0.2, 0.2, 1.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//front wall, green wall in front of camera
up = glm::vec3(0,0,1);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,0,-z)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[0] > -x && intersect[0] < x && intersect[1] < y && intersect[1] > -y )
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.0, 1.0, 0.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.0, 1.0, 0.0);
ipoint.ambient = glm::vec3(0.0, 1.0, 0.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//back wall, yellow wall behind camera
up = glm::vec3(0,0,-1);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,0,z)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[0] > -x && intersect[0] < x && intersect[1] < y && intersect[1] > -y )
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1.0, 1.0, 0.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(1.0, 1.0, 0.0);
ipoint.ambient = glm::vec3(1.0, 1.0, 0.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//right wall
up = glm::vec3(-1,0,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(x,0,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]<z && intersect[1] < y && intersect[1] > -y)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.2, 0.2, 1.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.2, 0.2, 1.0);
ipoint.ambient = glm::vec3(0.2, 0.2, 1.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//ceiling
up = glm::vec3(0,-1,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,y,0)-r.position), up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2] > -z && intersect[2] < z && intersect[0] > -x && intersect[0] < x)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1, 1, 1);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(.9, .9, .9);
ipoint.ambient = glm::vec3(1, 1,1);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
r.color = glm::vec3(0,0,0);
return false;
}
__global__ void RayIntersection(Ray* rays, int n, Ray* reflectedRays, Node* bvhhead, Isect* isectPoints, int* nw, int* ne, int* sw, int* se)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
glm::vec3 direction = rays[i].direction;
glm::vec3 position = rays[i].position;
reflectedRays[i].i = rays[i].i;
reflectedRays[i].j = rays[i].j;
reflectedRays[i].color = rays[i].color;
reflectedRays[i].raytype = 1;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool intersect = false;
bvhTraverse(position,direction,bvhhead,intersect,minT,intersectObj,minTnormal,minTintersection);
if(intersect)
{
if(rays[i].raytype == 0)
{
isectPoints[i].color = glm::vec3(0,0,0);
reflectedRays[i].surfaceReflectiveCoef = intersectObj.reflective;
isectPoints[i].reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
isectPoints[i].color = 0.2f * intersectObj.ambient;
}
else if(rays[i].raytype == 1){
isectPoints[i].reflectionCoef = rays[i].surfaceReflectiveCoef;
reflectedRays[i].surfaceReflectiveCoef = rays[i].surfaceReflectiveCoef*intersectObj.reflective;
}
minTintersection = minTintersection+minTnormal*0.7f;
reflectedRays[i].position = minTintersection;
reflectedRays[i].direction = glm::normalize(glm::reflect(rays[i].direction, minTnormal));
isectPoints[i].isectPoint = minTintersection;
isectPoints[i].incidentDirection = rays[i].direction;
isectPoints[i].normal = minTnormal;
isectPoints[i].i = rays[i].i;
isectPoints[i].j = rays[i].j;
isectPoints[i].diffuse = intersectObj.diffuse;
isectPoints[i].ambient = intersectObj.ambient;
isectPoints[i].shininess = intersectObj.shininess;
isectPoints[i].specular = intersectObj.specular;
isectPoints[i].reflective = intersectObj.reflective;
}
else
{
Isect point;
point.color = isectPoints[i].color;
wallIntersection(point,rays[i],reflectedRays[i],bvhhead);
isectPoints[i] = point;
reflectedRays[i].position = point.isectPoint;
reflectedRays[i].direction = glm::normalize(glm::reflect(rays[i].direction, point.normal));
isectPoints[i].i = rays[i].i;
isectPoints[i].j = rays[i].j;
}
/*if(reflectedRays[i].direction[0] <= 0.0f && reflectedRays[i].direction[1] >= 0.0f)
{
atomicAdd(nw,1);
}
else if(reflectedRays[i].direction[0] >= 0.0f && reflectedRays[i].direction[1] >= 0.0f)
{
atomicAdd(ne,1);
}
else if(reflectedRays[i].direction[0] <= 0.0f && reflectedRays[i].direction[1] <= 0.0f)
{
atomicAdd(sw,1);
}
else if(reflectedRays[i].direction[0] >= 0.0f && reflectedRays[i].direction[1] <= 0.0f)
{
atomicAdd(se,1);
}*/
}
}
__global__ void Shade(Isect* isectPoints, int n, Light* lights, int numlights, Node* bvhhead)
{
float RAY_EPSILON = 0.000000001;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
glm::vec3 color;
glm::vec3 direction = glm::normalize(isectPoints[i].incidentDirection);
glm::vec3 intersection = isectPoints[i].isectPoint;
glm::vec3 normal = isectPoints[i].normal;
float distance;
glm::vec3 toLight;
glm::vec3 reflectFromLight;
for(int j =0; j<numlights; j++)
{
Light l = lights[j];
/*if(l.area)
{
glm::vec3 avgcolor;
int lightSamples = 30;
for(int i = 0 ;i<lightSamples;i++)
{
float radius = hiprand_uniform(threadIdx.x*blockIdx.x)*l.radius;
float theta = hiprand_uniform(threadIdx.x*blockIdx.x)*(2*PI);
float x = radius * cos(theta);
float z = radius * sin(theta);
l.position[0] += x;
l.position[2] += z;
toLight = glm::normalize(l.position-intersection);
reflectFromLight = -toLight;
distance = 1.0f;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
intersection = intersection + .01f * normal;
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
if(minT>RAY_EPSILON)
{
glm::vec3 ipoint = intersection+minT*toLight;
float dtoLight = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
float dtoLightIntersection = sqrt(pow(ipoint[0]-intersection[0],2)+pow(ipoint[1]-intersection[1],2)+pow(ipoint[2]-intersection[2],2));
if(dtoLight>dtoLightIntersection)
distance = distance * 0;
}
}
avgcolor += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
color += avgcolor/(float)lightSamples;
}*/
if(l.point)
{
float d = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
distance = 1.0f/(l.constantTerm + l.linearTerm * d + l.quadraticTerm * pow(d,2));
if(distance>1.5)
distance = .5;
toLight = glm::normalize(l.position-intersection);
reflectFromLight = -toLight;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
intersection = intersection + .01f * normal;
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
glm::vec3 ipoint = intersection+minT*toLight;
float dtoLight = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
float dtoLightIntersection = sqrt(pow(ipoint[0]-intersection[0],2)+pow(ipoint[1]-intersection[1],2)+pow(ipoint[2]-intersection[2],2));
if(dtoLight>dtoLightIntersection)
distance = distance * 0;
}
color += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
else
{
distance = 1.0f;
toLight = -glm::normalize(l.direction);
reflectFromLight = glm::normalize(l.direction);
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
//check if light and surface normal are facing differnt directions
float dotP = glm::dot(reflectFromLight,normal);
if(dotP > -0.00001 )
shadow = true;
else
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
if(minT>RAY_EPSILON)
distance = distance * 0;
}
color += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
}
isectPoints[i].color += isectPoints[i].reflectionCoef * color;
}
}
void startRayTracing(float width, float height, unsigned char*& pixelcolorBuffer,glm::vec3 cameraPosition, glm::vec3 cameraDirection, std::vector<SceneObject*>& scene, std::vector<Light>& lights, Node* rootnode)
{
//set the stack size for threads
size_t limit = 4096;
hipDeviceSetLimit(hipLimitStackSize,limit);
Light* scenelights;
int numlights = (int)lights.size();
hipMallocManaged(&scenelights,numlights*sizeof(Light));
for(int i = 0; i<numlights; i++)
{
scenelights[i] = lights[i];
}
int totalRaysInSystem = width*height;
//for primary ray calcuations
glm::vec3 n = glm::normalize(cameraPosition-cameraDirection);
glm::vec3 u = glm::normalize(glm::cross(glm::vec3(0,1,0),n));
glm::vec3 v = glm::cross(n,u);
float fov = 45/(180.0 / PI);
float d = (height/tan(fov/2))/2;
glm::vec3 L = (cameraPosition-n*d) - u * (width/2) - v*(height/2);
//generate primary rays
Ray *cudarays;
hipMallocManaged(&cudarays,totalRaysInSystem*sizeof(Ray));
int blockSize = 256;
int numBlocks = (totalRaysInSystem + blockSize -1)/blockSize;
hipLaunchKernelGGL(( GeneratePrimaryRays), dim3(numBlocks),dim3(blockSize), 0, 0, cudarays,totalRaysInSystem, L, u, v, cameraPosition);
hipDeviceSynchronize();
Ray *reflectedRays;
hipMallocManaged(&reflectedRays,totalRaysInSystem*sizeof(Ray));
Isect *cpuisectPoints = (Isect *)malloc(totalRaysInSystem*sizeof(Isect));;
Isect *isectPoints;
hipMallocManaged(&isectPoints,totalRaysInSystem*sizeof(Isect));
/*USED FOR RAY SORTING NOT IMPLEMENTED YET*/
int *nw,*ne,*sw,*se;
hipMallocManaged(&nw,sizeof(int));
hipMallocManaged(&ne,sizeof(int));
hipMallocManaged(&sw,sizeof(int));
hipMallocManaged(&se,sizeof(int));
//******************************************
for(int i =0; i < numBounces; i++)
{
hipLaunchKernelGGL(( RayIntersection), dim3(numBlocks),dim3(blockSize), 0, 0, cudarays,totalRaysInSystem,reflectedRays,rootnode,isectPoints,nw,ne,sw,se);
hipDeviceSynchronize();
hipLaunchKernelGGL(( Shade), dim3(numBlocks), dim3(blockSize), 0, 0, isectPoints,totalRaysInSystem,scenelights,numlights,rootnode);
hipDeviceSynchronize();
hipMemcpy(cpuisectPoints, isectPoints, totalRaysInSystem*sizeof(Isect),hipMemcpyDeviceToHost);
hipMemcpy(cudarays, reflectedRays, totalRaysInSystem*sizeof(Ray),hipMemcpyDeviceToDevice);
}
for(int i = 0; i<totalRaysInSystem;i++)
{
cpuisectPoints[i].color = cpuisectPoints[i].color*255.0f;
cpuisectPoints[i].color = glm::clamp(cpuisectPoints[i].color,glm::vec3(0.0f,0.0f,0.0f),glm::vec3(255.0f,255.0f,255.0f));
pixelcolorBuffer[i*3] = cpuisectPoints[i].color[2];
pixelcolorBuffer[i*3+1] = cpuisectPoints[i].color[1];
pixelcolorBuffer[i*3+2] = cpuisectPoints[i].color[0];
}
hipFree(cudarays);
hipFree(reflectedRays);
hipFree(isectPoints);
free(cpuisectPoints);
hipFree(nw);
hipFree(ne);
hipFree(sw);
hipFree(se);
hipFree(scenelights);
}
| 98f27e97a5a420686b98344d07553d8d91c0df43.cu | //
// Ray.cpp
// RayTracer
//
// Created by Bo Miller on 1/2/19.
// Copyright © 2019 Bo Miller. All rights reserved.
//LDFLAGS= -L./glm/glm -glm
#include "glm/glm/glm.hpp"
#include "glm/glm/gtx/io.hpp"
#include <iostream>
#include "Ray.hpp"
#include <vector>
#include <atomic>
#include <mutex>
#include <math.h>
#define PI 3.14159265359
#include <pthread.h>
#include <chrono>
#include "bvh.hpp"
#include <random>
#include <queue>
#include "isect.hpp"
float RAY_EPSILON = 0.000000001;
int antialiasing = 0;
int numBounces = 2;
int SampPerPix = 4;
__global__ void GeneratePrimaryRays(Ray* rays, int n, glm::vec3 L, glm::vec3 u, glm::vec3 v, glm::vec3 cameraPosition)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
int ipix = i%720;
int jpix = i/720;
glm::vec3 pix = (L+u*float(ipix)+v*float(jpix));
float xoffset = 0;
float yoffset = 0;
glm::vec3 sample = glm::normalize(glm::vec3(pix[0]+xoffset,pix[1]+yoffset,pix[2])-cameraPosition);
rays[i].raytype = 0;
rays[i].position = cameraPosition;
rays[i].direction = sample;
rays[i].i = ipix;
rays[i].j = jpix;
rays[i].color = glm::vec3(0,0,0);
rays[i].surfaceReflectiveCoef = glm::vec3(0,0,0);
}
}
__device__ bool boundingBoxIntersection(glm::vec3& position, glm::vec3& direction, Node* node)
{
float tmin = (node->minX-position[0])/direction[0];
float tmax = (node->maxX-position[0])/direction[0];
if(tmin>tmax)
{
float temp = tmin;
tmin = tmax;
tmax = temp;
}
float tymin = (node->minY-position[1])/direction[1];
float tymax = (node->maxY-position[1])/direction[1];
if(tymin>tymax)
{
float temp = tymin;
tymin = tymax;
tymax = temp;
}
if((tmin > tymax) || (tymin > tmax))
return false;
if (tymin > tmin)
tmin = tymin;
if (tymax < tmax)
tmax = tymax;
float tzmin = (node->minZ-position[2])/direction[2];
float tzmax = (node->maxZ-position[2])/direction[2];
if (tzmin > tzmax)
{
float temp = tzmin;
tzmin = tzmax;
tzmax = temp;
}
if ((tmin > tzmax) || (tzmin > tmax))
return false;
if (tzmin > tmin)
tmin = tzmin;
if (tzmax < tmax)
tmax = tzmax;
return true;
}
__device__ bool intersectSphere(SceneObject& s, glm::vec3& position, glm::vec3& direction, float& isectT, glm::vec3& normal, glm::vec3& intersection)
{
float RAY_EPSILON = 0.000000001;
float a = glm::dot(direction, direction);
float b = 2 * glm::dot(direction,position-s.position);
float c = glm::dot(s.position,s.position) + glm::dot(position,position) + (-2 * glm::dot(s.position,position)) - pow(s.radius,2);
float discriminant = b*b - 4*a*c;
if(discriminant > 0.0+RAY_EPSILON)
{
float t = (-b - sqrt(discriminant))/(2*a);
float t2 = (-b + sqrt(discriminant))/(2*a);
if(t2>RAY_EPSILON)
{
//we know we have some intersection
if( t > RAY_EPSILON )
{
isectT = t;
}
else
{
isectT = t2;
}
intersection = position+t*direction;
normal = glm::normalize((intersection-s.position)/s.radius);
return true;
}
}
return false;
}
__device__ bool intersectTriangle(SceneObject& s, glm::vec3& position, glm::vec3& direction, glm::vec3& n, glm::vec3& intersection, float& time)
{
glm::vec3 normal = glm::normalize(glm::cross((s.v2-s.v1),(s.v3-s.v1)));
float denom = glm::dot(normal,direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((s.v1-position),normal)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = position+t*direction;
float test1 = glm::dot(glm::cross((s.v2-s.v1),(intersect-s.v1)),normal);
float test2 = glm::dot(glm::cross((s.v3-s.v2),(intersect-s.v2)),normal);
float test3 = glm::dot(glm::cross((s.v1-s.v3),(intersect-s.v3)),normal);
if(test1 >= 0.0 && test2 >= 0.0 && test3 >= 0.0)
{
glm::vec3 v0 = s.v2 - s.v1;
glm::vec3 v1 = s.v3 - s.v1;
glm::vec3 v2 = intersect - s.v1;
float d00 = glm::dot(v0, v0);
float d01 = glm::dot(v0, v1);
float d11 = glm::dot(v1, v1);
float d20 = glm::dot(v2, v0);
float d21 = glm::dot(v2, v1);
float denom = d00 * d11 - d01 * d01;
float v = (d11 * d20 - d01 * d21) / denom;
float w = (d00 * d21 - d01 * d20) / denom;
float u = 1.0f - v - w;
n = glm::normalize(u*s.v1Norm+v*s.v2Norm+w*s.v3Norm);
//n = -normal;
intersection = intersect;
time = t;
return true;
}
}
}
return false;
}
__device__ void bvhTraverse(glm::vec3& position, glm::vec3& direction, Node* currentNode,bool& intersect,float& minT, SceneObject& intersectObj, glm::vec3& minTnormal, glm::vec3& minTintersection)
{
if(currentNode->isleaf)
{
if(boundingBoxIntersection(position, direction, currentNode))
{
for(int i = 0; i<currentNode->numObjs;i++)
{
if(currentNode->objs[i]->sphere)
{
float iTime;
glm::vec3 normal;
glm::vec3 intersection;
if(intersectSphere(*currentNode->objs[i], position, direction, iTime, normal, intersection))
{
if(iTime<minT)
{
minTnormal = normal;
minTintersection = intersection;
intersectObj = *currentNode->objs[i];
minT = iTime;
intersect = true;
}
}
}
else if(currentNode->objs[i]->triangle)
{
float intersectT;
glm::vec3 normal;
glm::vec3 intersection;
if(intersectTriangle(*currentNode->objs[i], position, direction, normal, intersection, intersectT))
{
if(intersectT<minT)
{
minTnormal = normal;
minTintersection = intersection;
intersectObj = *currentNode->objs[i];
minT = intersectT;
intersect = true;
}
}
}
}
}
}
else
{
if(boundingBoxIntersection(position, direction, currentNode->left))
bvhTraverse(position, direction,currentNode->left,intersect,minT,intersectObj,minTnormal,minTintersection);
if(boundingBoxIntersection(position, direction, currentNode->right))
bvhTraverse(position, direction,currentNode->right,intersect,minT,intersectObj,minTnormal,minTintersection);
}
}
__device__ bool wallIntersection(Isect& ipoint, Ray& r, Ray& reflect, Node* root)
{
float x = 200.0f;
float y = 200.0f;
float z = 300.0f;
glm::vec3 up = glm::vec3(0,1,0);
float denom = glm::dot(up,r.direction);
if(fabsf(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,-y,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]<z && intersect[0]>-x && intersect[0] < x)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1.0, 0.2, 0.2);;
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef* glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(1.0, 0.2, 0.2);
ipoint.ambient = glm::vec3(1.0, 0.2, 0.2);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//left wall
up = glm::vec3(1,0,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(-x,0,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]< z && intersect[1] < y && intersect[1] > -y)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.2, 0.2, 1.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.2, 0.2, 1.0);
ipoint.ambient = glm::vec3(0.2, 0.2, 1.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//front wall, green wall in front of camera
up = glm::vec3(0,0,1);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,0,-z)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[0] > -x && intersect[0] < x && intersect[1] < y && intersect[1] > -y )
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.0, 1.0, 0.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.0, 1.0, 0.0);
ipoint.ambient = glm::vec3(0.0, 1.0, 0.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//back wall, yellow wall behind camera
up = glm::vec3(0,0,-1);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,0,z)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[0] > -x && intersect[0] < x && intersect[1] < y && intersect[1] > -y )
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1.0, 1.0, 0.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(1.0, 1.0, 0.0);
ipoint.ambient = glm::vec3(1.0, 1.0, 0.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//right wall
up = glm::vec3(-1,0,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(x,0,0)-r.position),up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2]>-z && intersect[2]<z && intersect[1] < y && intersect[1] > -y)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(0.2, 0.2, 1.0);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(0.2, 0.2, 1.0);
ipoint.ambient = glm::vec3(0.2, 0.2, 1.0);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
//ceiling
up = glm::vec3(0,-1,0);
denom = glm::dot(up,r.direction);
if(abs(denom) > .0001f)
{
float t = glm::dot((glm::vec3(0,y,0)-r.position), up)/denom;
if(t >= 0.0-.0001f)
{
glm::vec3 intersect = r.position+t*r.direction;
if(intersect[2] > -z && intersect[2] < z && intersect[0] > -x && intersect[0] < x)
{
if(r.raytype == 0)
{
ipoint.color = 0.2f * glm::vec3(1, 1, 1);
reflect.surfaceReflectiveCoef = glm::vec3(0.0,0.0,0.0);
ipoint.reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
}
else if(r.raytype == 1){
ipoint.reflectionCoef = r.surfaceReflectiveCoef;
reflect.surfaceReflectiveCoef = r.surfaceReflectiveCoef*glm::vec3(0.0,0.0,0.0);
}
ipoint.normal = up;
ipoint.isectPoint = intersect;
ipoint.incidentDirection = glm::normalize(glm::reflect(r.direction, up));
ipoint.diffuse = glm::vec3(.9, .9, .9);
ipoint.ambient = glm::vec3(1, 1,1);
ipoint.specular = glm::vec3(0.0,0.0,0.0);
ipoint.shininess = 2;
ipoint.reflective = glm::vec3(0.0,0.0,0.0);
return true;
}
}
}
r.color = glm::vec3(0,0,0);
return false;
}
__global__ void RayIntersection(Ray* rays, int n, Ray* reflectedRays, Node* bvhhead, Isect* isectPoints, int* nw, int* ne, int* sw, int* se)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
glm::vec3 direction = rays[i].direction;
glm::vec3 position = rays[i].position;
reflectedRays[i].i = rays[i].i;
reflectedRays[i].j = rays[i].j;
reflectedRays[i].color = rays[i].color;
reflectedRays[i].raytype = 1;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool intersect = false;
bvhTraverse(position,direction,bvhhead,intersect,minT,intersectObj,minTnormal,minTintersection);
if(intersect)
{
if(rays[i].raytype == 0)
{
isectPoints[i].color = glm::vec3(0,0,0);
reflectedRays[i].surfaceReflectiveCoef = intersectObj.reflective;
isectPoints[i].reflectionCoef = glm::vec3(1.0f,1.0f,1.0f);
isectPoints[i].color = 0.2f * intersectObj.ambient;
}
else if(rays[i].raytype == 1){
isectPoints[i].reflectionCoef = rays[i].surfaceReflectiveCoef;
reflectedRays[i].surfaceReflectiveCoef = rays[i].surfaceReflectiveCoef*intersectObj.reflective;
}
minTintersection = minTintersection+minTnormal*0.7f;
reflectedRays[i].position = minTintersection;
reflectedRays[i].direction = glm::normalize(glm::reflect(rays[i].direction, minTnormal));
isectPoints[i].isectPoint = minTintersection;
isectPoints[i].incidentDirection = rays[i].direction;
isectPoints[i].normal = minTnormal;
isectPoints[i].i = rays[i].i;
isectPoints[i].j = rays[i].j;
isectPoints[i].diffuse = intersectObj.diffuse;
isectPoints[i].ambient = intersectObj.ambient;
isectPoints[i].shininess = intersectObj.shininess;
isectPoints[i].specular = intersectObj.specular;
isectPoints[i].reflective = intersectObj.reflective;
}
else
{
Isect point;
point.color = isectPoints[i].color;
wallIntersection(point,rays[i],reflectedRays[i],bvhhead);
isectPoints[i] = point;
reflectedRays[i].position = point.isectPoint;
reflectedRays[i].direction = glm::normalize(glm::reflect(rays[i].direction, point.normal));
isectPoints[i].i = rays[i].i;
isectPoints[i].j = rays[i].j;
}
/*if(reflectedRays[i].direction[0] <= 0.0f && reflectedRays[i].direction[1] >= 0.0f)
{
atomicAdd(nw,1);
}
else if(reflectedRays[i].direction[0] >= 0.0f && reflectedRays[i].direction[1] >= 0.0f)
{
atomicAdd(ne,1);
}
else if(reflectedRays[i].direction[0] <= 0.0f && reflectedRays[i].direction[1] <= 0.0f)
{
atomicAdd(sw,1);
}
else if(reflectedRays[i].direction[0] >= 0.0f && reflectedRays[i].direction[1] <= 0.0f)
{
atomicAdd(se,1);
}*/
}
}
__global__ void Shade(Isect* isectPoints, int n, Light* lights, int numlights, Node* bvhhead)
{
float RAY_EPSILON = 0.000000001;
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for(int i = index; i < n; i+=stride)
{
glm::vec3 color;
glm::vec3 direction = glm::normalize(isectPoints[i].incidentDirection);
glm::vec3 intersection = isectPoints[i].isectPoint;
glm::vec3 normal = isectPoints[i].normal;
float distance;
glm::vec3 toLight;
glm::vec3 reflectFromLight;
for(int j =0; j<numlights; j++)
{
Light l = lights[j];
/*if(l.area)
{
glm::vec3 avgcolor;
int lightSamples = 30;
for(int i = 0 ;i<lightSamples;i++)
{
float radius = curand_uniform(threadIdx.x*blockIdx.x)*l.radius;
float theta = curand_uniform(threadIdx.x*blockIdx.x)*(2*PI);
float x = radius * cos(theta);
float z = radius * sin(theta);
l.position[0] += x;
l.position[2] += z;
toLight = glm::normalize(l.position-intersection);
reflectFromLight = -toLight;
distance = 1.0f;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
intersection = intersection + .01f * normal;
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
if(minT>RAY_EPSILON)
{
glm::vec3 ipoint = intersection+minT*toLight;
float dtoLight = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
float dtoLightIntersection = sqrt(pow(ipoint[0]-intersection[0],2)+pow(ipoint[1]-intersection[1],2)+pow(ipoint[2]-intersection[2],2));
if(dtoLight>dtoLightIntersection)
distance = distance * 0;
}
}
avgcolor += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
color += avgcolor/(float)lightSamples;
}*/
if(l.point)
{
float d = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
distance = 1.0f/(l.constantTerm + l.linearTerm * d + l.quadraticTerm * pow(d,2));
if(distance>1.5)
distance = .5;
toLight = glm::normalize(l.position-intersection);
reflectFromLight = -toLight;
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
intersection = intersection + .01f * normal;
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
glm::vec3 ipoint = intersection+minT*toLight;
float dtoLight = sqrt(pow(intersection[0]-l.position[0],2)+pow(intersection[1]-l.position[1],2)+pow(intersection[2]-l.position[2],2));
float dtoLightIntersection = sqrt(pow(ipoint[0]-intersection[0],2)+pow(ipoint[1]-intersection[1],2)+pow(ipoint[2]-intersection[2],2));
if(dtoLight>dtoLightIntersection)
distance = distance * 0;
}
color += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
else
{
distance = 1.0f;
toLight = -glm::normalize(l.direction);
reflectFromLight = glm::normalize(l.direction);
float minT = 1000000000;
SceneObject intersectObj;
glm::vec3 minTnormal;
glm::vec3 minTintersection;
bool shadow = false;
//check if light and surface normal are facing differnt directions
float dotP = glm::dot(reflectFromLight,normal);
if(dotP > -0.00001 )
shadow = true;
else
bvhTraverse(intersection,toLight,bvhhead,shadow,minT,intersectObj,minTnormal,minTintersection);
if(shadow)
{
if(minT>RAY_EPSILON)
distance = distance * 0;
}
color += distance * l.color * ( .6f * isectPoints[i].diffuse * glm::max(glm::dot(toLight,normal),0.0f) + .2f * isectPoints[i].specular * glm::pow(glm::dot(glm::reflect(reflectFromLight, normal), -direction),isectPoints[i].shininess));
}
}
isectPoints[i].color += isectPoints[i].reflectionCoef * color;
}
}
void startRayTracing(float width, float height, unsigned char*& pixelcolorBuffer,glm::vec3 cameraPosition, glm::vec3 cameraDirection, std::vector<SceneObject*>& scene, std::vector<Light>& lights, Node* rootnode)
{
//set the stack size for threads
size_t limit = 4096;
cudaDeviceSetLimit(cudaLimitStackSize,limit);
Light* scenelights;
int numlights = (int)lights.size();
cudaMallocManaged(&scenelights,numlights*sizeof(Light));
for(int i = 0; i<numlights; i++)
{
scenelights[i] = lights[i];
}
int totalRaysInSystem = width*height;
//for primary ray calcuations
glm::vec3 n = glm::normalize(cameraPosition-cameraDirection);
glm::vec3 u = glm::normalize(glm::cross(glm::vec3(0,1,0),n));
glm::vec3 v = glm::cross(n,u);
float fov = 45/(180.0 / PI);
float d = (height/tan(fov/2))/2;
glm::vec3 L = (cameraPosition-n*d) - u * (width/2) - v*(height/2);
//generate primary rays
Ray *cudarays;
cudaMallocManaged(&cudarays,totalRaysInSystem*sizeof(Ray));
int blockSize = 256;
int numBlocks = (totalRaysInSystem + blockSize -1)/blockSize;
GeneratePrimaryRays<<<numBlocks,blockSize>>>(cudarays,totalRaysInSystem, L, u, v, cameraPosition);
cudaDeviceSynchronize();
Ray *reflectedRays;
cudaMallocManaged(&reflectedRays,totalRaysInSystem*sizeof(Ray));
Isect *cpuisectPoints = (Isect *)malloc(totalRaysInSystem*sizeof(Isect));;
Isect *isectPoints;
cudaMallocManaged(&isectPoints,totalRaysInSystem*sizeof(Isect));
/*USED FOR RAY SORTING NOT IMPLEMENTED YET*/
int *nw,*ne,*sw,*se;
cudaMallocManaged(&nw,sizeof(int));
cudaMallocManaged(&ne,sizeof(int));
cudaMallocManaged(&sw,sizeof(int));
cudaMallocManaged(&se,sizeof(int));
//******************************************
for(int i =0; i < numBounces; i++)
{
RayIntersection<<<numBlocks,blockSize>>>(cudarays,totalRaysInSystem,reflectedRays,rootnode,isectPoints,nw,ne,sw,se);
cudaDeviceSynchronize();
Shade<<<numBlocks, blockSize>>>(isectPoints,totalRaysInSystem,scenelights,numlights,rootnode);
cudaDeviceSynchronize();
cudaMemcpy(cpuisectPoints, isectPoints, totalRaysInSystem*sizeof(Isect),cudaMemcpyDeviceToHost);
cudaMemcpy(cudarays, reflectedRays, totalRaysInSystem*sizeof(Ray),cudaMemcpyDeviceToDevice);
}
for(int i = 0; i<totalRaysInSystem;i++)
{
cpuisectPoints[i].color = cpuisectPoints[i].color*255.0f;
cpuisectPoints[i].color = glm::clamp(cpuisectPoints[i].color,glm::vec3(0.0f,0.0f,0.0f),glm::vec3(255.0f,255.0f,255.0f));
pixelcolorBuffer[i*3] = cpuisectPoints[i].color[2];
pixelcolorBuffer[i*3+1] = cpuisectPoints[i].color[1];
pixelcolorBuffer[i*3+2] = cpuisectPoints[i].color[0];
}
cudaFree(cudarays);
cudaFree(reflectedRays);
cudaFree(isectPoints);
free(cpuisectPoints);
cudaFree(nw);
cudaFree(ne);
cudaFree(sw);
cudaFree(se);
cudaFree(scenelights);
}
|
fabee59fc41d48607cbad1727943e37739aa801d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_minus_4_left [3][2];
static int dims_update_halo_kernel2_xvel_minus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_minus_4_left_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = -xvel0(4,0,0);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = -xvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_xvel_minus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[0][0] * dims_update_halo_kernel2_xvel_minus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[1][0] * dims_update_halo_kernel2_xvel_minus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_minus_4_left[0][0], dims_update_halo_kernel2_xvel_minus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_minus_4_left[1][0], dims_update_halo_kernel2_xvel_minus_4_left[1][1], arg1);
update_halo_kernel2_xvel_minus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,28)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28,"update_halo_kernel2_xvel_minus_4_left");
OPS_kernels[28].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_minus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_minus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_minus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_minus_4_left_h[1][1]) {
dims_update_halo_kernel2_xvel_minus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_minus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_minus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_minus_4_left_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_xvel_minus_4_left, dims_update_halo_kernel2_xvel_minus_4_left_h, sizeof(dims_update_halo_kernel2_xvel_minus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[28].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_xvel_minus_4_left), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[28].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[28].mpi_time += t2-t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 28;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 28;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_minus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(28,"update_halo_kernel2_xvel_minus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
| fabee59fc41d48607cbad1727943e37739aa801d.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_xvel_minus_4_left [3][2];
static int dims_update_halo_kernel2_xvel_minus_4_left_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_xvel_minus_4_left_gpu(ACC<double> &xvel0,
ACC<double> &xvel1,
const int* fields)
{
if(fields[FIELD_XVEL0] == 1) xvel0(0,0,0) = -xvel0(4,0,0);
if(fields[FIELD_XVEL1] == 1) xvel1(0,0,0) = -xvel1(4,0,0);
}
__global__ void ops_update_halo_kernel2_xvel_minus_4_left(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[0][0] * dims_update_halo_kernel2_xvel_minus_4_left[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_xvel_minus_4_left[1][0] * dims_update_halo_kernel2_xvel_minus_4_left[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_xvel_minus_4_left[0][0], dims_update_halo_kernel2_xvel_minus_4_left[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_xvel_minus_4_left[1][0], dims_update_halo_kernel2_xvel_minus_4_left[1][1], arg1);
update_halo_kernel2_xvel_minus_4_left_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,28)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(28,"update_halo_kernel2_xvel_minus_4_left");
OPS_kernels[28].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_xvel_minus_4_left_h[0][0] || ydim0 != dims_update_halo_kernel2_xvel_minus_4_left_h[0][1] || xdim1 != dims_update_halo_kernel2_xvel_minus_4_left_h[1][0] || ydim1 != dims_update_halo_kernel2_xvel_minus_4_left_h[1][1]) {
dims_update_halo_kernel2_xvel_minus_4_left_h[0][0] = xdim0;
dims_update_halo_kernel2_xvel_minus_4_left_h[0][1] = ydim0;
dims_update_halo_kernel2_xvel_minus_4_left_h[1][0] = xdim1;
dims_update_halo_kernel2_xvel_minus_4_left_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_xvel_minus_4_left, dims_update_halo_kernel2_xvel_minus_4_left_h, sizeof(dims_update_halo_kernel2_xvel_minus_4_left)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[28].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_xvel_minus_4_left<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[28].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[28].mpi_time += t2-t1;
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[28].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_xvel_minus_4_left(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 28;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 28;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_xvel_minus_4_left_execute;
if (OPS_diags > 1) {
ops_timing_realloc(28,"update_halo_kernel2_xvel_minus_4_left");
}
ops_enqueue_kernel(desc);
}
#endif
|
f431d954ab870249649c7e74f9ededd170f6d77f.hip | // !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.h"
#include "tsvd_c.h"
namespace ML {
using namespace MLCommon;
void tsvdFit(float *input, float *components, float *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdFit(double *input, double *components, double *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdFitTransform(float *input, float *trans_input, float *components,
float *explained_var, float *explained_var_ratio, float *singular_vals,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdFitTransform(double *input, double *trans_input, double *components,
double *explained_var, double *explained_var_ratio,
double *singular_vals, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipsolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(hipsolverDnCreate(&cusolver_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUSOLVER_CHECK(hipsolverDnDestroy(cusolver_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdTransform(float *input, float *components, float *trans_input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdTransform(input, components, trans_input, prms, cublas_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdTransform(double *input, double *components, double *trans_input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdTransform(input, components, trans_input, prms, cublas_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdInverseTransform(float *trans_input, float *components, float *input,
paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
void tsvdInverseTransform(double *trans_input, double *components,
double *input, paramsTSVD prms) {
hipblasHandle_t cublas_handle;
CUBLAS_CHECK(hipblasCreate(&cublas_handle));
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle, stream);
CUBLAS_CHECK(hipblasDestroy(cublas_handle));
CUDA_CHECK(hipStreamDestroy(stream));
}
/** @} */
}
;
// end namespace ML
| f431d954ab870249649c7e74f9ededd170f6d77f.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "tsvd.h"
#include "tsvd_c.h"
namespace ML {
using namespace MLCommon;
void tsvdFit(float *input, float *components, float *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdFit(double *input, double *components, double *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdFit(input, components, singular_vals, prms, cublas_handle, cusolver_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdFitTransform(float *input, float *trans_input, float *components,
float *explained_var, float *explained_var_ratio, float *singular_vals,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdFitTransform(double *input, double *trans_input, double *components,
double *explained_var, double *explained_var_ratio,
double *singular_vals, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cusolverDnHandle_t cusolver_handle = NULL;
CUSOLVER_CHECK(cusolverDnCreate(&cusolver_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdFitTransform(input, trans_input, components, explained_var,
explained_var_ratio, singular_vals, prms, cublas_handle,
cusolver_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUSOLVER_CHECK(cusolverDnDestroy(cusolver_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdTransform(float *input, float *components, float *trans_input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdTransform(input, components, trans_input, prms, cublas_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdTransform(double *input, double *components, double *trans_input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdTransform(input, components, trans_input, prms, cublas_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdInverseTransform(float *trans_input, float *components, float *input,
paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
void tsvdInverseTransform(double *trans_input, double *components,
double *input, paramsTSVD prms) {
cublasHandle_t cublas_handle;
CUBLAS_CHECK(cublasCreate(&cublas_handle));
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
tsvdInverseTransform(trans_input, components, input, prms, cublas_handle, stream);
CUBLAS_CHECK(cublasDestroy(cublas_handle));
CUDA_CHECK(cudaStreamDestroy(stream));
}
/** @} */
}
;
// end namespace ML
|
077736300c33b06cc2a5c22c07481f65dc49f37b.hip | // !!! This is a file automatically generated by hipify!!!
#include <drivers/adam_driver.h>
#include <solvers/adam.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/gen_random.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <functions/dev_initializations.h>
#include <utilities/print_utils.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
void initAdamParams( ADAM_PARAMS *params, int n )
{
//sampled_tr_cg.m file.
params->step= 0.001; //learning rate
params->beta1 = 0.9;
params->beta2 = 0.999;
params->eps = 1e-8; //eps
params->lambda = 0;
params->maxProps = ULONG_MAX;
params->maxEpochs = 20;
params->sampleSize = floor( 256 );
}
void testAdam (NN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch ) {
ADAM_PARAMS mParams;
//begin here
fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n");
initAdamParams( &mParams, data->trainSizeX );
fprintf( stderr, "... Done parms initialization \n\n");
//init weights to ZEROS
cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET );
//init weights to Random Vector
/*
getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL );
copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize,
ERROR_MEMCPY_DEVICE_DEVICE );
real scale = 0.25;
cublasCheckError( hipblasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 ));
*/
adam ( model, data, scratch, &mParams );
fprintf( stderr, ".... Done testing of Adam \n\n\n" );
}
| 077736300c33b06cc2a5c22c07481f65dc49f37b.cu |
#include <drivers/adam_driver.h>
#include <solvers/adam.h>
#include <core/errors.h>
#include <device/cuda_utils.h>
#include <device/gen_random.h>
#include <device/device_defines.h>
#include <device/handles.h>
#include <functions/dev_initializations.h>
#include <utilities/print_utils.h>
#include <limits.h>
#include <stdlib.h>
#include <stdio.h>
#include <float.h>
void initAdamParams( ADAM_PARAMS *params, int n )
{
//sampled_tr_cg.m file.
params->step= 0.001; //learning rate
params->beta1 = 0.9;
params->beta2 = 0.999;
params->eps = 1e-8; //eps
params->lambda = 0;
params->maxProps = ULONG_MAX;
params->maxEpochs = 20;
params->sampleSize = floor( 256 );
}
void testAdam (NN_MODEL *model, DEVICE_DATASET *data,
SCRATCH_AREA *scratch ) {
ADAM_PARAMS mParams;
//begin here
fprintf( stderr, "Initiating the Trust Region Test now..... \n\n\n");
initAdamParams( &mParams, data->trainSizeX );
fprintf( stderr, "... Done parms initialization \n\n");
//init weights to ZEROS
cuda_memset( data->weights, 0, sizeof(real) * model->pSize, ERROR_MEMSET );
//init weights to Random Vector
/*
getRandomVector( model->pSize, NULL, scratch->nextDevPtr, RAND_NORMAL );
copy_device( data->weights, scratch->nextDevPtr, sizeof(real) * model->pSize,
ERROR_MEMCPY_DEVICE_DEVICE );
real scale = 0.25;
cublasCheckError( cublasDscal( cublasHandle, model->pSize, &scale, data->weights, 1 ));
*/
adam ( model, data, scratch, &mParams );
fprintf( stderr, ".... Done testing of Adam \n\n\n" );
}
|
0330ebd32e9b409dd15fe058aafac7236f8aa255.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kernel_push_stochastic1(int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int *g_block_num, int width1)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
s_push_reser[thid] = g_push_reser[thid];
if (thid == 0)
{
if ((*g_count_blocks) == 0)
(*g_finish) = false;
}
} | 0330ebd32e9b409dd15fe058aafac7236f8aa255.cu | #include "includes.h"
__global__ void kernel_push_stochastic1(int *g_push_reser, int *s_push_reser, int *g_count_blocks, bool *g_finish, int *g_block_num, int width1)
{
int x = __umul24(blockIdx.x, blockDim.x) + threadIdx.x;
int y = __umul24(blockIdx.y, blockDim.y) + threadIdx.y;
int thid = __umul24(y, width1) + x;
s_push_reser[thid] = g_push_reser[thid];
if (thid == 0)
{
if ((*g_count_blocks) == 0)
(*g_finish) = false;
}
} |
74e29bd866cd6a9d1c681dabbbd37982b6d0fd7a.hip | // !!! This is a file automatically generated by hipify!!!
#include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include <ATen/Context.h>
#include <c10/hip/HIPFunctions.h>
#include <ATen/hip/impl/HIPCachingAllocatorMasqueradingAsCUDA.h>
#include "hipcub/hipcub.hpp"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
C10_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
C10_DEFINE_int(
caffe2_cub_bin_growth,
8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
C10_DEFINE_int(
caffe2_cub_min_bin,
3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_bin,
10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_managed_mb,
10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
C10_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
C10_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
C10_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace at {
REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext);
} // namespace at
namespace caffe2 {
// Generic implementation - CUDA will handle the right function to call for us
void CUDAContext::CopyBytesAsync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// TODO: verify that the CUDA handles copy from device to device correctly
// even without SetDevice()
// TODO: verify whether source or dest device should be a priority in picking
// the stream
// NB: right now the cross-device copy logic is invoked only in the contexts
// when surrounding code explicitly manages data dependencies and sets up
// events, so it's fine. In order to make it a standalone function proper
// synchronization between stream is required
int gpu_id = 0;
if (dst_device.is_cuda()) {
gpu_id = dst_device.index();
} else if (src_device.is_cuda()) {
gpu_id = src_device.index();
} else {
LOG(FATAL) << "shouldn't be called with non-cuda device";
}
CUDA_ENFORCE(hipMemcpyAsync(
dst,
src,
nbytes,
hipMemcpyDefault,
CUDAContext::getCudaObjects().GetStream(gpu_id)));
}
void CUDAContext::CopyBytesSync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// This emulates Caffe2 original behavior where sync copy doesn't change the
// device. It's probably better for clarity to switch to the target device
// explicitly here, but in the worst case CUDA would sync for us.
// TODO: change it to HIPGuardMasqueradingAsCUDA
CUDAContext context(-1); // take current device
CUDA_ENFORCE(hipMemcpyAsync(
dst, src, nbytes, hipMemcpyDefault, context.cuda_stream()));
// destructor of context synchronizes
}
// For the CPU context, we also allow a (probably expensive) function
// to copy the data from a cuda context. Inside the function, we create
// a temporary CUDAContext object to carry out the copy. From the caller's
// side, these functions are synchronous with respect to the host, similar
// to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call.
template <>
inline void CPUContext::CopyBytes<CUDAContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(src));
context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);
}
template <>
inline void CPUContext::CopyBytes<CPUContext, CUDAContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(dst));
context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);
}
} // namespace caffe2
namespace caffe2 {
ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() {
static thread_local ThreadLocalCUDAObjects cuda_objects_;
return cuda_objects_;
}
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<hipcub::CachingDeviceAllocator> g_cub_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is guarded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
C10_LOG_API_USAGE_ONCE("caffe2.init.cuda");
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
C10_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
C10_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile.");
for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) {
HIPGuardMasqueradingAsCUDA g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = ::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(hipDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for hipDeviceEnablePeerAccess that should always be
// zero currently.
// It is ok if peer access is already enabled...
hipError_t err = hipDeviceEnablePeerAccess(j, 0);
if ((err != hipErrorPeerAccessAlreadyEnabled) &&
(err != hipSuccess)) {
CAFFE_THROW(hipGetErrorString(err));
}
hipGetLastError(); // reset cuda error code
}
}
}
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new hipcub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
// Initialize caching allocator
at::globalContext().lazyInitCUDA();
} else {
CAFFE_THROW(
"Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool);
}
}
/**
* An allocator that does the CPU memory allocation with pinned memory.
*
* This is needed because if we want to do any asynchronous cuda memcpy,
* the underlying CPU memory also needs to be allocated into pinned memory
* space. As a result, whenever Caffe2 is built with GPU and there is
* GPU present during runtime, at global initialization time we will set
* the CPU memory allocator to allocate pinned memory.
*
* NB: This behavior is probably too aggressive. We should consider asking users
* to do on-demand memory pinning (like exposed in PyTorch APIs) instead.
*/
struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator {
PinnedCPUAllocator() {
baseAllocator_ = GetDefaultCPUAllocator();
}
~PinnedCPUAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
if (nbytes == 0) {
// replicate c10::alloc_cpu behavior - return nullptr
return {nullptr, nullptr, &Delete, at::Device(CPU)};
}
void* data;
at::DataPtr data_ptr;
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (IsNUMAEnabled()) {
at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter();
data_ptr = baseAllocator_->allocate(nbytes);
data = data_ptr.get();
CAFFE_ENFORCE(data);
CUDA_ENFORCE(hipHostRegister(data, nbytes, hipHostRegisterDefault));
CAFFE_ENFORCE(
data_ptr.compare_exchange_deleter(expected_deleter, &Delete),
"Failed to swap deleter (already swapped?)");
} else {
CUDA_ENFORCE(hipHostMalloc(&data, nbytes));
profiledCPUMemoryReporter().New(data, nbytes);
data_ptr = {data, data, &Delete, at::Device(CPU)};
}
memset(data, 0, nbytes);
return data_ptr;
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* data) {
if (!data) {
return;
}
// Caffe2 uses a lazy way to figure out if one is actually going to use GPUs
// or not. If a CUDAContext::New() call is made, inside the CUDAContext
// function we will switch the cpu side allocator to a PinnedCPUAllocator.
// But, if one calls CPUContext::New() before any cuda allocations,
// PinnedCPUAllocator can still delete the corresponding memory.
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (IsNUMAEnabled()) {
CUDA_ENFORCE(hipHostUnregister(data));
GetDefaultCPUAllocator()->raw_deleter()(data);
} else {
hipError_t err = hipHostFree(data);
profiledCPUMemoryReporter().Delete(data);
if (err == hipErrorInvalidValue) {
free(data);
// Calling hipGetLastError will reset the cuda error.
hipError_t _err = hipGetLastError();
} else {
// For all other errors, still do a cuda check.
CUDA_ENFORCE(err);
}
}
}
at::Allocator* baseAllocator_;
};
static PinnedCPUAllocator g_pinned_cpu_alloc;
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if C10_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"hipHostMalloc. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
// If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator
// will cause memory corruptions. Therefore, we need to set the priority
// to highest to avoid being overwritten.
SetCPUAllocator(
&g_pinned_cpu_alloc,
std::numeric_limits<uint8_t>::max() /* priority */);
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(DeviceIndex gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_device_id() ? RectifyGPUID(option.device_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), PROTO_CUDA);
}
CUDAContext::~CUDAContext() {
try {
if (curand_generator_) {
CURAND_CHECK(hiprandDestroyGenerator(curand_generator_));
}
// CUDAContext is used in 2 cases now:
// - long-lived instance inside OperatorBase in which case what happens in
// destructor doesn't really matter
// - short-lived on-the-fly instances that are utilized as HIPGuardMasqueradingAsCUDA - in
// this case there's only one stream id (passed to SwitchToDevice) and
// it's preferrable to synchronize in the destructor
FinishDeviceComputation();
} catch (const std::exception& e) {
LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what();
}
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
struct DefaultCUDAAllocator final : public at::Allocator {
DefaultCUDAAllocator() {}
~DefaultCUDAAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
if (nbytes != 0) {
CUDA_ENFORCE(hipMalloc(&ptr, nbytes));
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::CUB:
if (nbytes != 0) {
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
}
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::THC:
{
// The reason we have this stream guard here is to preserve
// the historical behavior of the 'thc' allocator in Caffe2,
// which is to put all allocations on the same (default)
// stream. This behavior is morally wrong (since passing
// allocations between streams allows for the possibility
// of you handing out some memory that an old stream
// is still working on), but it doesn't seem to cause issues
// in Caffe2 today. Our hypothesis for why this is the case
// is that Caffe2 doesn't really do very many allocations
// on the fly; instead they allocate once and then reuse
// the allocations for the whole program. In this case,
// the hazard is avoided.
//
// We intend to remove this stream guard, but the benefit
// to putting all allocations on the same stream is it
// reduces per-stream fragmentation, and this helps
// some models that are currently running with the thc
// allocator fit in memory. We will need to find some
// way of resolving this problem.
hip::HIPStreamGuardMasqueradingAsCUDA g(
Stream(
Stream::DEFAULT,
Device(kCUDA, CaffeCudaGetDevice())
));
ptr = hip::HIPCachingAllocator::raw_alloc(nbytes);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple hipFree.
hipError_t error = hipFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is
// exiting anyway, we will not need to worry about memory leak, so we
// basically ignore it. This is definitely not ideal but works for now.
if (error != hipSuccess && error != hipErrorDeinitialized) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< hipGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
hip::HIPCachingAllocator::raw_delete(ptr);
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
};
static DefaultCUDAAllocator g_cuda_alloc;
REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc);
} // namespace caffe2
namespace at {
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CPU,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CPU,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
} // namespace at
| 74e29bd866cd6a9d1c681dabbbd37982b6d0fd7a.cu | #include <algorithm>
#include <atomic>
#include <cstdlib>
#include <string>
#include <unordered_map>
#include <ATen/Context.h>
#include <c10/cuda/CUDAFunctions.h>
#include <c10/cuda/CUDACachingAllocator.h>
#include "cub/util_allocator.cuh"
// Needed to be included first to check the CAFFE2_USE_CUDNN macros.
#include "caffe2/core/macros.h"
#include "caffe2/core/blob_stats.h"
#ifdef CAFFE2_USE_CUDNN
#include "caffe2/core/common_cudnn.h"
#endif // CAFFE2_USE_CUDNN
#include "caffe2/core/context_gpu.h"
#include "caffe2/core/init.h"
#include "caffe2/core/logging.h"
#include "caffe2/core/tensor.h"
#include "caffe2/utils/string_utils.h"
C10_DEFINE_string(
caffe2_cuda_memory_pool,
"",
"Sets the memory pool used by caffe2. Possible values are "
"none, cnmem, thc and cub.");
// For description of CUB caching allocator configuration, see
// https://nvlabs.github.io/cub/structcub_1_1_caching_device_allocator.html
C10_DEFINE_int(
caffe2_cub_bin_growth,
8,
"If using cub as the memory allocator, sets the growth of bins "
"used by the cub pool.");
C10_DEFINE_int(
caffe2_cub_min_bin,
3,
"If using cub as the memory allocator, sets the min number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_bin,
10,
"If using cub as the memory allocator, sets the max number of "
"bins.");
C10_DEFINE_int(
caffe2_cub_max_managed_mb,
10 * 1024,
"If using cub as the memory allocators, sets the maximum amount "
"of memory managed in gigabytes");
C10_DEFINE_bool(
caffe2_cub_print_allocation_events,
false,
"If true CachingDeviceAllocator will print allocation and deallocation "
"events to stdout.");
C10_DEFINE_bool(
caffe2_gpu_memory_tracking,
false,
"If set, logs changes in GPU memory allocations");
C10_DEFINE_int(
caffe2_gpu_memory_report_interval_mb,
128,
"The threshold in MB on how frequently to report memory changes");
namespace at {
REGISTER_CONTEXT(DeviceType::CUDA, caffe2::CUDAContext);
} // namespace at
namespace caffe2 {
// Generic implementation - CUDA will handle the right function to call for us
void CUDAContext::CopyBytesAsync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// TODO: verify that the CUDA handles copy from device to device correctly
// even without SetDevice()
// TODO: verify whether source or dest device should be a priority in picking
// the stream
// NB: right now the cross-device copy logic is invoked only in the contexts
// when surrounding code explicitly manages data dependencies and sets up
// events, so it's fine. In order to make it a standalone function proper
// synchronization between stream is required
int gpu_id = 0;
if (dst_device.is_cuda()) {
gpu_id = dst_device.index();
} else if (src_device.is_cuda()) {
gpu_id = src_device.index();
} else {
LOG(FATAL) << "shouldn't be called with non-cuda device";
}
CUDA_ENFORCE(cudaMemcpyAsync(
dst,
src,
nbytes,
cudaMemcpyDefault,
CUDAContext::getCudaObjects().GetStream(gpu_id)));
}
void CUDAContext::CopyBytesSync(
size_t nbytes,
const void* src,
Device src_device,
void* dst,
Device dst_device) {
// This emulates Caffe2 original behavior where sync copy doesn't change the
// device. It's probably better for clarity to switch to the target device
// explicitly here, but in the worst case CUDA would sync for us.
// TODO: change it to CUDAGuard
CUDAContext context(-1); // take current device
CUDA_ENFORCE(cudaMemcpyAsync(
dst, src, nbytes, cudaMemcpyDefault, context.cuda_stream()));
// destructor of context synchronizes
}
// For the CPU context, we also allow a (probably expensive) function
// to copy the data from a cuda context. Inside the function, we create
// a temporary CUDAContext object to carry out the copy. From the caller's
// side, these functions are synchronous with respect to the host, similar
// to a normal CPUContext::CopyBytes<CPUContext, CPUContext> call.
template <>
inline void CPUContext::CopyBytes<CUDAContext, CPUContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(src));
context.CopyBytes<CUDAContext, CPUContext>(nbytes, src, dst);
}
template <>
inline void CPUContext::CopyBytes<CPUContext, CUDAContext>(
size_t nbytes,
const void* src,
void* dst) {
CUDAContext context(GetGPUIDForPointer(dst));
context.CopyBytes<CPUContext, CUDAContext>(nbytes, src, dst);
}
} // namespace caffe2
namespace caffe2 {
ThreadLocalCUDAObjects& CUDAContext::getCudaObjects() {
static thread_local ThreadLocalCUDAObjects cuda_objects_;
return cuda_objects_;
}
// TODO(jiayq): these variables shouldn't be currently accessed during static
// initialization. We should consider moving them to a Mayer's singleton to
// be totally safe against SIOF.
// Static global variables for setting up the memory pool.
CudaMemoryPoolType g_cuda_memory_pool_type;
std::unique_ptr<cub::CachingDeviceAllocator> g_cub_allocator;
// an unordered map that holds the map from the cuda memory pointer to the
// device id that it is allocated from. This is used in the cuda memory pool
// cases, where we need the device id to carry out the deletion.
// Note(jiayq): an alternate approach is to use cudaGetPointerAttributes, but
// that is usually quite slow. We might want to benchmark the speed difference
// though.
// Note(jiayq): another alternate approach is to augment the Tensor class that
// would allow one to record the device id. However, this does not address any
// non-tensor allocation and deallocation.
// Ideally, a memory pool should already have the device id information, as
// long as we are using UVA (as of CUDA 5 and later) so the addresses are
// unique.
static std::unordered_map<void*, uint8_t> g_cuda_device_affiliation;
// Data structures for optional memory tracking. Access to these structures
// is guarded by the CUDAContext::mutex.
static std::unordered_map<void*, long> g_size_map;
static std::vector<long> g_total_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0);
static std::vector<long> g_max_by_gpu_map(C10_COMPILE_TIME_MAX_GPUS, 0);
static long g_total_mem = 0;
static long g_last_rep = 0;
CudaMemoryPoolType GetCudaMemoryPoolType() {
return g_cuda_memory_pool_type;
}
///////////////////////////////////////////////////////////////////////////////
// A wrapper to allow us to lazily initialize all cuda environments that Caffe
// uses. This gets done the first time a caffe2::CUDAContext::New() gets called
// which is probably the decisive indication that this caffe2 run is going to
// use GPUs. We avoid cuda initialization with core/init.h functionalities so
// that we have minimal resource impact in case we will need to run multiple
// caffe2 instances on a GPU machine.
///////////////////////////////////////////////////////////////////////////////
static void Caffe2InitializeCuda() {
// If the current run does not have any cuda devices, do nothing.
if (!HasCudaGPU()) {
VLOG(1) << "No cuda gpu present. Skipping.";
return;
}
C10_LOG_API_USAGE_ONCE("caffe2.init.cuda");
// Check if the number of GPUs matches the expected compile-time max number
// of GPUs.
CAFFE_ENFORCE_LE(
NumCudaDevices(),
C10_COMPILE_TIME_MAX_GPUS,
"Number of CUDA devices on the machine is larger than the compiled "
"max number of gpus expected (",
C10_COMPILE_TIME_MAX_GPUS,
"). Increase that and recompile.");
for (DeviceIndex i = 0; i < NumCudaDevices(); ++i) {
CUDAGuard g(i);
// Enable peer access.
const int peer_group = i / CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_start = peer_group * CAFFE2_CUDA_MAX_PEER_SIZE;
const int peer_end = std::min(
NumCudaDevices(), (peer_group + 1) * CAFFE2_CUDA_MAX_PEER_SIZE);
VLOG(1) << "Enabling peer access within group #" << peer_group
<< ", from gpuid " << peer_start << " to " << peer_end - 1
<< ", for gpuid " << i << ".";
for (int j = peer_start; j < peer_end; ++j) {
if (i == j) continue;
int can_access;
CUDA_ENFORCE(cudaDeviceCanAccessPeer(&can_access, i, j));
if (can_access) {
VLOG(1) << "Enabling peer access from " << i << " to " << j;
// Note: just for future reference, the 0 here is not a gpu id, it is
// a reserved flag for cudaDeviceEnablePeerAccess that should always be
// zero currently.
// It is ok if peer access is already enabled...
cudaError_t err = cudaDeviceEnablePeerAccess(j, 0);
if ((err != cudaErrorPeerAccessAlreadyEnabled) &&
(err != cudaSuccess)) {
CAFFE_THROW(cudaGetErrorString(err));
}
cudaGetLastError(); // reset cuda error code
}
}
}
#ifdef CAFFE2_USE_CUDNN
// Check the versions of cuDNN that were compiled and linked with are compatible
CheckCuDNNVersions();
#endif // CAFFE2_USE_CUDNN
}
static void SetUpCub() {
VLOG(1) << "Setting up cub memory pool.";
// Sets up the cub memory pool
try {
g_cub_allocator.reset(new cub::CachingDeviceAllocator(
FLAGS_caffe2_cub_bin_growth,
FLAGS_caffe2_cub_min_bin,
FLAGS_caffe2_cub_max_bin,
size_t(FLAGS_caffe2_cub_max_managed_mb) * 1024L * 1024L,
false,
FLAGS_caffe2_cub_print_allocation_events));
} catch (...) {
CAFFE_THROW("Some error happened at cub initialization.");
}
VLOG(1) << "Done setting up cub memory pool.";
}
static void Caffe2SetCUDAMemoryPool() {
if (FLAGS_caffe2_cuda_memory_pool == "" ||
FLAGS_caffe2_cuda_memory_pool == "none") {
g_cuda_memory_pool_type = CudaMemoryPoolType::NONE;
} else if (FLAGS_caffe2_cuda_memory_pool == "cnmem") {
CAFFE_THROW("CNMEM is no longer used by Caffe2. Use cub instead. "
"This error message may go away in the future.");
} else if (FLAGS_caffe2_cuda_memory_pool == "cub") {
// Sets up cub.
g_cuda_memory_pool_type = CudaMemoryPoolType::CUB;
SetUpCub();
} else if (FLAGS_caffe2_cuda_memory_pool == "thc") {
g_cuda_memory_pool_type = CudaMemoryPoolType::THC;
// Initialize caching allocator
at::globalContext().lazyInitCUDA();
} else {
CAFFE_THROW(
"Unrecognized cuda memory pool type: ", FLAGS_caffe2_cuda_memory_pool);
}
}
/**
* An allocator that does the CPU memory allocation with pinned memory.
*
* This is needed because if we want to do any asynchronous cuda memcpy,
* the underlying CPU memory also needs to be allocated into pinned memory
* space. As a result, whenever Caffe2 is built with GPU and there is
* GPU present during runtime, at global initialization time we will set
* the CPU memory allocator to allocate pinned memory.
*
* NB: This behavior is probably too aggressive. We should consider asking users
* to do on-demand memory pinning (like exposed in PyTorch APIs) instead.
*/
struct CAFFE2_CUDA_API PinnedCPUAllocator final : public at::Allocator {
PinnedCPUAllocator() {
baseAllocator_ = GetDefaultCPUAllocator();
}
~PinnedCPUAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
if (nbytes == 0) {
// replicate c10::alloc_cpu behavior - return nullptr
return {nullptr, nullptr, &Delete, at::Device(CPU)};
}
void* data;
at::DataPtr data_ptr;
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (IsNUMAEnabled()) {
at::DeleterFnPtr expected_deleter = baseAllocator_->raw_deleter();
data_ptr = baseAllocator_->allocate(nbytes);
data = data_ptr.get();
CAFFE_ENFORCE(data);
CUDA_ENFORCE(cudaHostRegister(data, nbytes, cudaHostRegisterDefault));
CAFFE_ENFORCE(
data_ptr.compare_exchange_deleter(expected_deleter, &Delete),
"Failed to swap deleter (already swapped?)");
} else {
CUDA_ENFORCE(cudaMallocHost(&data, nbytes));
profiledCPUMemoryReporter().New(data, nbytes);
data_ptr = {data, data, &Delete, at::Device(CPU)};
}
memset(data, 0, nbytes);
return data_ptr;
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* data) {
if (!data) {
return;
}
// Caffe2 uses a lazy way to figure out if one is actually going to use GPUs
// or not. If a CUDAContext::New() call is made, inside the CUDAContext
// function we will switch the cpu side allocator to a PinnedCPUAllocator.
// But, if one calls CPUContext::New() before any cuda allocations,
// PinnedCPUAllocator can still delete the corresponding memory.
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (IsNUMAEnabled()) {
CUDA_ENFORCE(cudaHostUnregister(data));
GetDefaultCPUAllocator()->raw_deleter()(data);
} else {
cudaError_t err = cudaFreeHost(data);
profiledCPUMemoryReporter().Delete(data);
if (err == cudaErrorInvalidValue) {
free(data);
// Calling cudaGetLastError will reset the cuda error.
cudaError_t _err = cudaGetLastError();
} else {
// For all other errors, still do a cuda check.
CUDA_ENFORCE(err);
}
}
}
at::Allocator* baseAllocator_;
};
static PinnedCPUAllocator g_pinned_cpu_alloc;
// An initialization function that sets the CPU side to use pinned cpu
// allocator.
void Caffe2UsePinnedCPUAllocator() {
#if C10_ASAN_ENABLED
// Note(jiayq): for more details, see
// https://github.com/google/sanitizers/issues/629
LOG(WARNING) << "There are known issues between address sanitizer and "
"cudaMallocHost. As a result, caffe2 will not enable pinned "
"memory allocation in asan mode. If you are expecting any "
"behavior that depends on asan, be advised that it is not "
"turned on.";
#else
if (!HasCudaGPU()) {
VLOG(1) << "No GPU present. I won't use pinned allocator then.";
return;
}
VLOG(1) << "Caffe2 gpu: setting CPUAllocator to PinnedCPUAllocator.";
// If CUDA is enabled, using CPU allocators other than PinnedCPUAllocator
// will cause memory corruptions. Therefore, we need to set the priority
// to highest to avoid being overwritten.
SetCPUAllocator(
&g_pinned_cpu_alloc,
std::numeric_limits<uint8_t>::max() /* priority */);
#endif
}
// Caffe2CudaInitializerHelper is a minimal struct whose sole purpose is to
// detect the first hint that this Caffe2 run is going to use GPU: either
// CUDAContext is initialized or CUDAContext::New is called. It then runs
// all the related cuda initialization functions.
namespace {
struct Caffe2CudaInitializerHelper {
Caffe2CudaInitializerHelper() {
// We cannot use bool because nvcc changes bool to __nv_bool which does
// not have a std::atomic instantiation.
static std::atomic<char> first_call(1);
if (first_call.fetch_and((char)0)) {
Caffe2InitializeCuda();
Caffe2SetCUDAMemoryPool();
Caffe2UsePinnedCPUAllocator();
}
}
};
} // namespace
/**
* A utility function to rectify the gpu id. If the context specifies the
* gpu id to be -1, it means that we will just use the current gpu id when
* the function is being called.
*/
static inline DeviceIndex RectifyGPUID(DeviceIndex gpu_id) {
return gpu_id == -1 ? CaffeCudaGetDevice() : gpu_id;
}
CUDAContext::CUDAContext(DeviceIndex gpu_id)
: gpu_id_(RectifyGPUID(gpu_id)), random_seed_(RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
}
CUDAContext::CUDAContext(const DeviceOption& option)
: gpu_id_(
option.has_device_id() ? RectifyGPUID(option.device_id())
: CaffeCudaGetDevice()),
random_seed_(
option.has_random_seed() ? option.random_seed()
: RandomNumberSeed()) {
static Caffe2CudaInitializerHelper g_cuda_initializer_;
DCHECK_EQ(option.device_type(), PROTO_CUDA);
}
CUDAContext::~CUDAContext() {
try {
if (curand_generator_) {
CURAND_CHECK(curandDestroyGenerator(curand_generator_));
}
// CUDAContext is used in 2 cases now:
// - long-lived instance inside OperatorBase in which case what happens in
// destructor doesn't really matter
// - short-lived on-the-fly instances that are utilized as CUDAGuard - in
// this case there's only one stream id (passed to SwitchToDevice) and
// it's preferrable to synchronize in the destructor
FinishDeviceComputation();
} catch (const std::exception& e) {
LOG(ERROR) << "Encountered following in " << __FUNCTION__ << ": " << e.what();
}
}
// shared mutex to lock out alloc / free during NCCL launches
std::mutex& CUDAContext::mutex() {
static std::mutex m;
return m;
}
std::vector<long> CUDAContext::TotalMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_total_by_gpu_map;
}
std::vector<long> CUDAContext::MaxMemoryByGpu() {
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
CAFFE_ENFORCE(
FLAGS_caffe2_gpu_memory_tracking,
"Pass --caffe2_gpu_memory_tracking to enable memory stats");
return g_max_by_gpu_map;
}
namespace {
void TrackMemoryAlloc(size_t nbytes) {
int this_gpu = CaffeCudaGetDevice();
g_total_by_gpu_map[this_gpu] += nbytes;
g_max_by_gpu_map[this_gpu] =
std::max(g_max_by_gpu_map[this_gpu], g_total_by_gpu_map[this_gpu]);
g_total_mem += nbytes;
if (g_total_mem - g_last_rep >
FLAGS_caffe2_gpu_memory_report_interval_mb * 1024 * 1024) {
for (int gpu = 0; gpu < g_total_by_gpu_map.size(); gpu++) {
long t = g_total_by_gpu_map[gpu];
long max_t = g_max_by_gpu_map[gpu];
if (max_t > 0) {
if (max_t != t) {
VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB"
<< " (max: " << max_t / 1024 / 1024 << " MB)";
} else {
VLOG(1) << "GPU " << gpu << ": " << t / 1024 / 1024 << " MB";
}
}
}
VLOG(1) << "Total: " << g_total_mem / 1024 / 1024 << " MB";
g_last_rep = g_total_mem;
}
}
}
struct DefaultCUDAAllocator final : public at::Allocator {
DefaultCUDAAllocator() {}
~DefaultCUDAAllocator() override {}
at::DataPtr allocate(size_t nbytes) const override {
// Lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
// A one-time caffe2 cuda initializer.
static Caffe2CudaInitializerHelper g_cuda_initializer_;
void* ptr = nullptr;
if (FLAGS_caffe2_gpu_memory_tracking) {
TrackMemoryAlloc(nbytes);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE:
if (nbytes != 0) {
CUDA_ENFORCE(cudaMalloc(&ptr, nbytes));
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::CUB:
if (nbytes != 0) {
CUDA_ENFORCE(g_cub_allocator->DeviceAllocate(&ptr, nbytes));
}
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
VLOG(2) << "CUB allocating pointer " << ptr << " on device "
<< CaffeCudaGetDevice();
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
case CudaMemoryPoolType::THC:
{
// The reason we have this stream guard here is to preserve
// the historical behavior of the 'thc' allocator in Caffe2,
// which is to put all allocations on the same (default)
// stream. This behavior is morally wrong (since passing
// allocations between streams allows for the possibility
// of you handing out some memory that an old stream
// is still working on), but it doesn't seem to cause issues
// in Caffe2 today. Our hypothesis for why this is the case
// is that Caffe2 doesn't really do very many allocations
// on the fly; instead they allocate once and then reuse
// the allocations for the whole program. In this case,
// the hazard is avoided.
//
// We intend to remove this stream guard, but the benefit
// to putting all allocations on the same stream is it
// reduces per-stream fragmentation, and this helps
// some models that are currently running with the thc
// allocator fit in memory. We will need to find some
// way of resolving this problem.
cuda::CUDAStreamGuard g(
Stream(
Stream::DEFAULT,
Device(kCUDA, CaffeCudaGetDevice())
));
ptr = cuda::CUDACachingAllocator::raw_alloc(nbytes);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_size_map[ptr] = nbytes;
g_cuda_device_affiliation[ptr] = CaffeCudaGetDevice();
}
return {ptr, ptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
return {nullptr, nullptr, &Delete, at::Device(CUDA, CaffeCudaGetDevice())};
}
at::DeleterFnPtr raw_deleter() const override {
return &Delete;
}
private:
static void Delete(void* ptr) {
// lock the mutex
std::lock_guard<std::mutex> lock(CUDAContext::mutex());
if (FLAGS_caffe2_gpu_memory_tracking) {
auto sz_it = g_size_map.find(ptr);
DCHECK(sz_it != g_size_map.end());
auto aff_it = g_cuda_device_affiliation.find(ptr);
DCHECK(aff_it != g_cuda_device_affiliation.end());
g_total_mem -= sz_it->second;
g_total_by_gpu_map[aff_it->second] -= sz_it->second;
g_size_map.erase(sz_it);
}
switch (g_cuda_memory_pool_type) {
case CudaMemoryPoolType::NONE: {
// If memory pool is not set up, use simple cudaFree.
cudaError_t error = cudaFree(ptr);
// For some reason, in Python runtime we sometimes delete a data pointer
// after the cuda runtime exits - this is odd but is probably caused by
// a static workspace that pycaffe2 uses, and the destruction got
// entangled in some race condition. Anyway, since cuda runtime is
// exiting anyway, we will not need to worry about memory leak, so we
// basically ignore it. This is definitely not ideal but works for now.
if (error != cudaSuccess && error != cudaErrorCudartUnloading) {
LOG(FATAL) << "Error at: " << __FILE__ << ":" << __LINE__ << ": "
<< cudaGetErrorString(error);
}
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
case CudaMemoryPoolType::CUB: {
auto it = g_cuda_device_affiliation.find(ptr);
DCHECK(it != g_cuda_device_affiliation.end());
VLOG(2) << "CUB freeing pointer " << ptr << " on device " << it->second;
CUDA_ENFORCE(g_cub_allocator->DeviceFree(it->second, ptr));
g_cuda_device_affiliation.erase(it);
break;
}
case CudaMemoryPoolType::THC: {
cuda::CUDACachingAllocator::raw_delete(ptr);
if (FLAGS_caffe2_gpu_memory_tracking) {
g_cuda_device_affiliation.erase(g_cuda_device_affiliation.find(ptr));
}
break;
}
}
}
};
static DefaultCUDAAllocator g_cuda_alloc;
REGISTER_ALLOCATOR(CUDA, &g_cuda_alloc);
} // namespace caffe2
namespace at {
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CUDA,
DeviceType::CPU,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
REGISTER_COPY_BYTES_FUNCTION(
DeviceType::CPU,
DeviceType::CUDA,
caffe2::CUDAContext::CopyBytesSync,
caffe2::CUDAContext::CopyBytesAsync);
} // namespace at
|
0b1f9ef9ccffe8817c3eb27533864a9672dddbce.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <string.h>
#include <time.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include "../include/cudaHash.h"
#include "scanImpl.cu"
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
/*
* Transform integer to string using one single gpu thread.
*/
__device__ static char * gpuItoa(int value, char* result, int base){
if (base < 2 || base > 36) {
*result = '\0';
return result;
}
char* ptr = result, *ptr1 = result, tmp_char;
int tmp_value;
do {
tmp_value = value;
value /= base;
*ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmnopqrstuvwxyz" [35 + (tmp_value - value * base)];
} while ( value );
if (tmp_value < 0)
*ptr++ = '-';
*ptr-- = '\0';
while(ptr1 < ptr) {
tmp_char = *ptr;
*ptr--= *ptr1;
*ptr1++ = tmp_char;
}
return result;
}
/*
* string copy using one gpu thread.
*/
__device__ static char * gpuStrcpy(char * dst, const char * src){
char * orig = dst;
while(*src)
*dst++ = *src++;
*dst = '\0';
return orig;
}
__device__ static char* gpuStrncat(char *dest, const char *src, size_t n)
{
int dest_len = 0;
int i;
char * tmp = dest;
while(*tmp != '\0'){
tmp++;
dest_len ++;
}
for (i = 0 ; i < n && src[i] != '\0' ; i++)
dest[dest_len + i] = src[i];
dest[dest_len + i] = '\0';
return dest;
}
__device__ static char * gpuStrcat(char * dest, const char * src){
char *tmp =dest;
int dest_len = 0;
int i;
while (*tmp!= '\0'){
tmp++ ;
dest_len ++;
}
for(i=0; src[i] !='\0'; i++){
dest[dest_len + i] = src[i];
}
dest[dest_len + i] = '\0';
return dest;
}
/*
* Combine the group by columns to build the group by keys.
*/
__global__ static void build_groupby_key(char ** content, int gbColNum, int * gbIndex, int * gbType, int * gbSize, long tupleNum, int * key, int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(long i = offset; i< tupleNum; i+= stride){
char buf[128] = {0};
for (int j=0; j< gbColNum; j++){
char tbuf[32]={0};
int index = gbIndex[j];
if (index == -1){
gpuItoa(1,tbuf,10);
gpuStrncat(buf,tbuf,1);
}else if (gbType[j] == STRING){
gpuStrncat(buf, content[index] + i*gbSize[j], gbSize[j]);
}else if (gbType[j] == INT){
int key = ((int *)(content[index]))[i];
gpuItoa(key,tbuf,10);
gpuStrcat(buf,tbuf);
}
}
int hkey = StringHash(buf) % HSIZE;
key[i]= hkey;
num[hkey] = 1;
}
}
/*
* This is for testing only.
*/
__global__ static void build_groupby_key_soa(char ** content, int gbColNum, int * gbIndex, int * gbType, int * gbSize, long tupleNum, int * key, int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(long i = offset; i< tupleNum; i+= stride){
char buf[128] = {0};
for (int j=0; j< gbColNum; j++){
char tbuf[32]={0};
int index = gbIndex[j];
if (index == -1){
gpuItoa(1,tbuf,10);
gpuStrncat(buf,tbuf,1);
}else if (gbType[j] == STRING){
for(int k=0;k<gbSize[j];k++){
long pos = k*tupleNum + i;
buf[k] = content[index][pos];
}
gpuStrncat(buf,tbuf,gbSize[j]);
}else if (gbType[j] == INT){
int key = ((int *)(content[index]))[i];
gpuItoa(key,tbuf,10);
gpuStrcat(buf,tbuf);
}
}
int hkey = StringHash(buf) % HSIZE;
key[i]= hkey;
num[hkey] = 1;
}
}
/*
* Count the number of groups
*/
__global__ void count_group_num(int *num, int tupleNum, int *totalCount){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = 0;
for(int i=offset; i<tupleNum; i+= stride){
if(num[i] == 1){
localCount ++;
}
}
atomicAdd(totalCount,localCount);
}
/*
* Calculate the groupBy expression.
*/
__device__ static float calMathExp(char **content, struct mathExp * exp, int pos, int op){
float res ;
if(op == NOOP){
if (exp[0].opType == CONS)
res = exp[0].opValue;
else{
int index = exp[0].opValue;
res = ((int *)(content[index]))[pos];
}
}else if(op == PLUS ){
res = calMathExp(content, &exp[0],pos, NOOP) + calMathExp(content, &exp[1], pos, NOOP);
}else if (op == MINUS){
res = calMathExp(content, &exp[0],pos, NOOP) - calMathExp(content, &exp[1], pos, NOOP);
}else if (op == MULTIPLY){
res = calMathExp(content, &exp[0],pos, NOOP) * calMathExp(content, &exp[1], pos, NOOP);
}else if (op == DIVIDE){
res = calMathExp(content, &exp[0],pos, NOOP) / calMathExp(content, &exp[1], pos, NOOP);
}
return res;
}
/*
* group by constant. Currently only support SUM function.
*/
__global__ void agg_cal_cons(char ** content, int colNum, int * funcArray, int *op, struct mathExp* exp, int * mathOffset, int * gbType, int * gbSize, long tupleNum, int * key, int *psum, char ** result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
float buf[32];
for(int i=0;i<32;i++)
buf[i] = 0;
for(int i=index;i<tupleNum;i+=stride){
for(int j=0;j<colNum;j++){
int func = funcArray[j];
int offset = mathOffset[j];
if (func == SUM){
float tmpRes = calMathExp(content,&exp[offset] , i, op[j]);
buf[j] += tmpRes;
}
}
}
for(int i=0;i<colNum;i++)
atomicAdd(&((float *)result[i])[0], buf[i]);
}
/*
* gropu by
*/
__global__ void agg_cal(char ** content, int colNum, int * funcArray, int * op, struct mathExp* exp, int * mathOffset, int * gbType, int * gbSize, long tupleNum, int * key, int *psum, char ** result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<tupleNum;i+=stride){
int hKey = key[i];
int offset = psum[hKey];
for(int j=0;j<colNum;j++){
int func = funcArray[j];
int mo = mathOffset[j];
if(func ==NOOP){
int type = exp[mo].opType;
if(type == CONS){
int value = exp[mo].opValue;
((int *)result[j])[offset] = value;
}else{
int index = exp[mo].opValue;
int attrSize = gbSize[j];
if(attrSize == sizeof(int))
((int *)result[j])[offset] = ((int*)content[index])[i];
else
memcpy(result[j] + offset*attrSize, content[index] + i * attrSize, attrSize);
}
}else if (func == SUM){
float tmpRes = calMathExp(content, &exp[mo], i, op[j]);
atomicAdd(& ((float *)result[j])[offset], tmpRes);
}
}
}
}
/*
* groupBy: group by the data and calculate.
*
* Prerequisite:
* input data are not compressed
*
* Input:
* gb: the groupby node which contains the input data and groupby information
* pp: records the statistics such as kernel execution time
*
* Return:
* a new table node
*/
struct tableNode * groupBy(struct groupByNode * gb, struct statistic * pp){
struct timespec start,end;
clock_gettime(CLOCK_REALTIME,&start);
int *gpuGbIndex = NULL, gpuTupleNum, gpuGbColNum;
int *gpuGbType = NULL, *gpuGbSize = NULL;
int *gpuGbKey = NULL;
char ** gpuContent = NULL, **column = NULL;
/*
* @gbCount: the number of groups
* gbConstant: whether group by constant
*/
int gbCount;
int gbConstant = 0;
struct tableNode *res = (struct tableNode *) malloc(sizeof(struct tableNode));
CHECK_POINTER(res);
res->tupleSize = gb->tupleSize;
res->totalAttr = gb->outputAttrNum;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrType);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrSize);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrTotalSize);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataPos);
res->dataFormat = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataFormat);
res->content = (char **) malloc(sizeof(char **) * res->totalAttr);
CHECK_POINTER(res->content);
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = gb->attrType[i];
res->attrSize[i] = gb->attrSize[i];
res->dataFormat[i] = UNCOMPRESSED;
}
gpuTupleNum = gb->table->tupleNum;
gpuGbColNum = gb->groupByColNum;
if(gpuGbColNum == 1 && gb->groupByIndex[0] == -1){
gbConstant = 1;
}
dim3 grid(1024);
dim3 block(128);
int blockNum = gb->table->tupleNum / block.x + 1;
if(blockNum < 1024)
grid = blockNum;
int *gpu_hashNum = NULL, *gpu_psum = NULL, *gpuGbCount = NULL;
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, gb->table->totalAttr * sizeof(char *), HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuContent, gb->table->totalAttr * sizeof(char *)));
#endif
column = (char **) malloc(sizeof(char *) * gb->table->totalAttr);
CHECK_POINTER(column);
for(int i=0;i<gb->table->totalAttr;i++){
int attrSize = gb->table->attrSize[i];
if(gb->table->dataPos[i]==MEM){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)& column[i], attrSize * gb->table->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(column[i], gb->table->content[i], attrSize *gb->table->tupleNum, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &column[i], sizeof(char *), hipMemcpyHostToDevice));
}else{
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuContent[i], &gb->table->content[i], sizeof(char *), hipMemcpyHostToDevice));
}
}
if(gbConstant != 1){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbType, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuGbType,gb->groupByType, sizeof(int) * gb->groupByColNum, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbSize, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuGbSize,gb->groupBySize, sizeof(int) * gb->groupByColNum, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbKey, gb->table->tupleNum * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbIndex, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuGbIndex, gb->groupByIndex,sizeof(int) * gb->groupByColNum, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpu_hashNum,sizeof(int)*HSIZE));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(gpu_hashNum,0,sizeof(int)*HSIZE));
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
GMM_CALL(cudaReference(7, HINT_WRITE));
hipLaunchKernelGGL(( build_groupby_key), dim3(grid),dim3(block), 0, 0, gpuContent,gpuGbColNum, gpuGbIndex, gpuGbType,gpuGbSize,gpuTupleNum, gpuGbKey, gpu_hashNum);
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbType));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbSize));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbIndex));
gbCount = 1;
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbCount,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(gpuGbCount, 0, sizeof(int)));
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
hipLaunchKernelGGL(( count_group_num), dim3(grid),dim3(block), 0, 0, gpu_hashNum, HSIZE, gpuGbCount);
CUDA_SAFE_CALL_NO_SYNC(hipDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gbCount, gpuGbCount, sizeof(int), hipMemcpyDeviceToHost));
CUDA_SAFE_CALL(hipMalloc((void**)&gpu_psum,HSIZE*sizeof(int)));
scanImpl(gpu_hashNum,HSIZE,gpu_psum,pp);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbCount));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_hashNum));
}
if(gbConstant == 1)
res->tupleNum = 1;
else
res->tupleNum = gbCount;
printf("(INFO) Number of groupBy results: %d\n",res->tupleNum);
char ** gpuResult = NULL;
char ** result = NULL;
result = (char **)malloc(sizeof(char*)*res->totalAttr);
CHECK_POINTER(result);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void**)&gpuResult, sizeof(char *)* res->totalAttr, HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuResult, sizeof(char *)* res->totalAttr));
#endif
for(int i=0; i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&result[i], res->tupleNum * res->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(hipMemset(result[i], 0, res->tupleNum * res->attrSize[i]));
res->content[i] = result[i];
res->dataPos[i] = GPU;
res->attrTotalSize[i] = res->tupleNum * res->attrSize[i];
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&gpuResult[i], &result[i], sizeof(char *), hipMemcpyHostToDevice));
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbType, sizeof(int)*res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuGbType, res->attrType, sizeof(int)*res->totalAttr, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuGbSize, sizeof(int)*res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuGbSize, res->attrSize, sizeof(int)*res->totalAttr, hipMemcpyHostToDevice));
struct mathExp * gpuMathExp = NULL;
int * cpuFunc = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuFunc = NULL;
int * op = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuOp = NULL;
int * mathExpOffset = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuMathOffset = NULL;
int mathExpNum = 0;
for(int i=0;i<res->totalAttr;i++){
mathExpOffset[i] = mathExpNum;
cpuFunc[i] = gb->gbExp[i].func;
op[i] = gb->gbExp[i].exp.op;
if(gb->gbExp[i].exp.opNum == 2)
mathExpNum += 2;
else
mathExpNum += 1;
}
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void **)&gpuMathExp, sizeof(struct mathExp) * mathExpNum));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuMathOffset, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuMathOffset,mathExpOffset, sizeof(int) * res->totalAttr, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuFunc, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuFunc, cpuFunc, sizeof(int) * res->totalAttr, hipMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(hipMalloc((void**)&gpuOp, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(gpuOp, op, sizeof(int) * res->totalAttr, hipMemcpyHostToDevice));
for(int i=0;i<res->totalAttr;i++){
int offset = mathExpOffset[i];
if(gb->gbExp[i].exp.opNum == 2){
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&(gpuMathExp[offset]),(struct mathExp*)gb->gbExp[i].exp.exp,2*sizeof(struct mathExp), hipMemcpyHostToDevice));
}else{
CUDA_SAFE_CALL_NO_SYNC(hipMemcpy(&(gpuMathExp[offset]),&(gb->gbExp[i].exp),sizeof(struct mathExp), hipMemcpyHostToDevice));
}
}
free(mathExpOffset);
free(cpuFunc);
free(op);
gpuGbColNum = res->totalAttr;
if(gbConstant !=1){
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_READ));
GMM_CALL(cudaReference(7, HINT_READ));
GMM_CALL(cudaReference(9, HINT_READ));
GMM_CALL(cudaReference(10, HINT_READ));
GMM_CALL(cudaReference(11, HINT_READ|HINT_PTARRAY|HINT_PTADEFAULT));
hipLaunchKernelGGL(( agg_cal), dim3(grid),dim3(block), 0, 0, gpuContent, gpuGbColNum, gpuFunc, gpuOp, gpuMathExp,gpuMathOffset, gpuGbType, gpuGbSize, gpuTupleNum, gpuGbKey, gpu_psum, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbKey));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpu_psum));
}else {
// kaibo: gpuGbKey and gpu_psum are not allocated when gbConstant == 1, so we should not reference them in ths case
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_READ));
GMM_CALL(cudaReference(7, HINT_READ));
//GMM_CALL(cudaReference(9, HINT_READ));
//GMM_CALL(cudaReference(10, HINT_READ));
GMM_CALL(cudaReference(11, HINT_READ|HINT_PTARRAY|HINT_PTADEFAULT));
hipLaunchKernelGGL(( agg_cal_cons), dim3(grid),dim3(block), 0, 0, gpuContent, gpuGbColNum, gpuFunc, gpuOp, gpuMathExp,gpuMathOffset, gpuGbType, gpuGbSize, gpuTupleNum, NULL, NULL, gpuResult);
}
for(int i=0; i<gb->table->totalAttr;i++){
if(gb->table->dataPos[i]==MEM)
CUDA_SAFE_CALL_NO_SYNC(hipFree(column[i]));
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbType));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuGbSize));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuMathExp));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuMathOffset));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuFunc));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuOp));
CUDA_SAFE_CALL_NO_SYNC(hipFree(gpuResult));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("GroupBy Time: %lf\n", timeE/(1000*1000));
return res;
}
| 0b1f9ef9ccffe8817c3eb27533864a9672dddbce.cu | /*
Copyright (c) 2012-2013 The Ohio State University.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <stdio.h>
#include <cuda.h>
#include <string.h>
#include <time.h>
#include "../include/common.h"
#include "../include/gpuCudaLib.h"
#include "../include/cudaHash.h"
#include "scanImpl.cu"
#define CHECK_POINTER(p) do { \
if(p == NULL){ \
perror("Failed to allocate host memory"); \
exit(-1); \
}} while(0)
/*
* Transform integer to string using one single gpu thread.
*/
__device__ static char * gpuItoa(int value, char* result, int base){
if (base < 2 || base > 36) {
*result = '\0';
return result;
}
char* ptr = result, *ptr1 = result, tmp_char;
int tmp_value;
do {
tmp_value = value;
value /= base;
*ptr++ = "zyxwvutsrqponmlkjihgfedcba9876543210123456789abcdefghijklmnopqrstuvwxyz" [35 + (tmp_value - value * base)];
} while ( value );
if (tmp_value < 0)
*ptr++ = '-';
*ptr-- = '\0';
while(ptr1 < ptr) {
tmp_char = *ptr;
*ptr--= *ptr1;
*ptr1++ = tmp_char;
}
return result;
}
/*
* string copy using one gpu thread.
*/
__device__ static char * gpuStrcpy(char * dst, const char * src){
char * orig = dst;
while(*src)
*dst++ = *src++;
*dst = '\0';
return orig;
}
__device__ static char* gpuStrncat(char *dest, const char *src, size_t n)
{
int dest_len = 0;
int i;
char * tmp = dest;
while(*tmp != '\0'){
tmp++;
dest_len ++;
}
for (i = 0 ; i < n && src[i] != '\0' ; i++)
dest[dest_len + i] = src[i];
dest[dest_len + i] = '\0';
return dest;
}
__device__ static char * gpuStrcat(char * dest, const char * src){
char *tmp =dest;
int dest_len = 0;
int i;
while (*tmp!= '\0'){
tmp++ ;
dest_len ++;
}
for(i=0; src[i] !='\0'; i++){
dest[dest_len + i] = src[i];
}
dest[dest_len + i] = '\0';
return dest;
}
/*
* Combine the group by columns to build the group by keys.
*/
__global__ static void build_groupby_key(char ** content, int gbColNum, int * gbIndex, int * gbType, int * gbSize, long tupleNum, int * key, int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(long i = offset; i< tupleNum; i+= stride){
char buf[128] = {0};
for (int j=0; j< gbColNum; j++){
char tbuf[32]={0};
int index = gbIndex[j];
if (index == -1){
gpuItoa(1,tbuf,10);
gpuStrncat(buf,tbuf,1);
}else if (gbType[j] == STRING){
gpuStrncat(buf, content[index] + i*gbSize[j], gbSize[j]);
}else if (gbType[j] == INT){
int key = ((int *)(content[index]))[i];
gpuItoa(key,tbuf,10);
gpuStrcat(buf,tbuf);
}
}
int hkey = StringHash(buf) % HSIZE;
key[i]= hkey;
num[hkey] = 1;
}
}
/*
* This is for testing only.
*/
__global__ static void build_groupby_key_soa(char ** content, int gbColNum, int * gbIndex, int * gbType, int * gbSize, long tupleNum, int * key, int *num){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
for(long i = offset; i< tupleNum; i+= stride){
char buf[128] = {0};
for (int j=0; j< gbColNum; j++){
char tbuf[32]={0};
int index = gbIndex[j];
if (index == -1){
gpuItoa(1,tbuf,10);
gpuStrncat(buf,tbuf,1);
}else if (gbType[j] == STRING){
for(int k=0;k<gbSize[j];k++){
long pos = k*tupleNum + i;
buf[k] = content[index][pos];
}
gpuStrncat(buf,tbuf,gbSize[j]);
}else if (gbType[j] == INT){
int key = ((int *)(content[index]))[i];
gpuItoa(key,tbuf,10);
gpuStrcat(buf,tbuf);
}
}
int hkey = StringHash(buf) % HSIZE;
key[i]= hkey;
num[hkey] = 1;
}
}
/*
* Count the number of groups
*/
__global__ void count_group_num(int *num, int tupleNum, int *totalCount){
int stride = blockDim.x * gridDim.x;
int offset = blockIdx.x * blockDim.x + threadIdx.x;
int localCount = 0;
for(int i=offset; i<tupleNum; i+= stride){
if(num[i] == 1){
localCount ++;
}
}
atomicAdd(totalCount,localCount);
}
/*
* Calculate the groupBy expression.
*/
__device__ static float calMathExp(char **content, struct mathExp * exp, int pos, int op){
float res ;
if(op == NOOP){
if (exp[0].opType == CONS)
res = exp[0].opValue;
else{
int index = exp[0].opValue;
res = ((int *)(content[index]))[pos];
}
}else if(op == PLUS ){
res = calMathExp(content, &exp[0],pos, NOOP) + calMathExp(content, &exp[1], pos, NOOP);
}else if (op == MINUS){
res = calMathExp(content, &exp[0],pos, NOOP) - calMathExp(content, &exp[1], pos, NOOP);
}else if (op == MULTIPLY){
res = calMathExp(content, &exp[0],pos, NOOP) * calMathExp(content, &exp[1], pos, NOOP);
}else if (op == DIVIDE){
res = calMathExp(content, &exp[0],pos, NOOP) / calMathExp(content, &exp[1], pos, NOOP);
}
return res;
}
/*
* group by constant. Currently only support SUM function.
*/
__global__ void agg_cal_cons(char ** content, int colNum, int * funcArray, int *op, struct mathExp* exp, int * mathOffset, int * gbType, int * gbSize, long tupleNum, int * key, int *psum, char ** result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
float buf[32];
for(int i=0;i<32;i++)
buf[i] = 0;
for(int i=index;i<tupleNum;i+=stride){
for(int j=0;j<colNum;j++){
int func = funcArray[j];
int offset = mathOffset[j];
if (func == SUM){
float tmpRes = calMathExp(content,&exp[offset] , i, op[j]);
buf[j] += tmpRes;
}
}
}
for(int i=0;i<colNum;i++)
atomicAdd(&((float *)result[i])[0], buf[i]);
}
/*
* gropu by
*/
__global__ void agg_cal(char ** content, int colNum, int * funcArray, int * op, struct mathExp* exp, int * mathOffset, int * gbType, int * gbSize, long tupleNum, int * key, int *psum, char ** result){
int stride = blockDim.x * gridDim.x;
int index = blockIdx.x * blockDim.x + threadIdx.x;
for(int i=index;i<tupleNum;i+=stride){
int hKey = key[i];
int offset = psum[hKey];
for(int j=0;j<colNum;j++){
int func = funcArray[j];
int mo = mathOffset[j];
if(func ==NOOP){
int type = exp[mo].opType;
if(type == CONS){
int value = exp[mo].opValue;
((int *)result[j])[offset] = value;
}else{
int index = exp[mo].opValue;
int attrSize = gbSize[j];
if(attrSize == sizeof(int))
((int *)result[j])[offset] = ((int*)content[index])[i];
else
memcpy(result[j] + offset*attrSize, content[index] + i * attrSize, attrSize);
}
}else if (func == SUM){
float tmpRes = calMathExp(content, &exp[mo], i, op[j]);
atomicAdd(& ((float *)result[j])[offset], tmpRes);
}
}
}
}
/*
* groupBy: group by the data and calculate.
*
* Prerequisite:
* input data are not compressed
*
* Input:
* gb: the groupby node which contains the input data and groupby information
* pp: records the statistics such as kernel execution time
*
* Return:
* a new table node
*/
struct tableNode * groupBy(struct groupByNode * gb, struct statistic * pp){
struct timespec start,end;
clock_gettime(CLOCK_REALTIME,&start);
int *gpuGbIndex = NULL, gpuTupleNum, gpuGbColNum;
int *gpuGbType = NULL, *gpuGbSize = NULL;
int *gpuGbKey = NULL;
char ** gpuContent = NULL, **column = NULL;
/*
* @gbCount: the number of groups
* gbConstant: whether group by constant
*/
int gbCount;
int gbConstant = 0;
struct tableNode *res = (struct tableNode *) malloc(sizeof(struct tableNode));
CHECK_POINTER(res);
res->tupleSize = gb->tupleSize;
res->totalAttr = gb->outputAttrNum;
res->attrType = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrType);
res->attrSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrSize);
res->attrTotalSize = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->attrTotalSize);
res->dataPos = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataPos);
res->dataFormat = (int *) malloc(sizeof(int) * res->totalAttr);
CHECK_POINTER(res->dataFormat);
res->content = (char **) malloc(sizeof(char **) * res->totalAttr);
CHECK_POINTER(res->content);
for(int i=0;i<res->totalAttr;i++){
res->attrType[i] = gb->attrType[i];
res->attrSize[i] = gb->attrSize[i];
res->dataFormat[i] = UNCOMPRESSED;
}
gpuTupleNum = gb->table->tupleNum;
gpuGbColNum = gb->groupByColNum;
if(gpuGbColNum == 1 && gb->groupByIndex[0] == -1){
gbConstant = 1;
}
dim3 grid(1024);
dim3 block(128);
int blockNum = gb->table->tupleNum / block.x + 1;
if(blockNum < 1024)
grid = blockNum;
int *gpu_hashNum = NULL, *gpu_psum = NULL, *gpuGbCount = NULL;
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void **)&gpuContent, gb->table->totalAttr * sizeof(char *), HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuContent, gb->table->totalAttr * sizeof(char *)));
#endif
column = (char **) malloc(sizeof(char *) * gb->table->totalAttr);
CHECK_POINTER(column);
for(int i=0;i<gb->table->totalAttr;i++){
int attrSize = gb->table->attrSize[i];
if(gb->table->dataPos[i]==MEM){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)& column[i], attrSize * gb->table->tupleNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(column[i], gb->table->content[i], attrSize *gb->table->tupleNum, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &column[i], sizeof(char *), cudaMemcpyHostToDevice));
}else{
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuContent[i], &gb->table->content[i], sizeof(char *), cudaMemcpyHostToDevice));
}
}
if(gbConstant != 1){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbType, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuGbType,gb->groupByType, sizeof(int) * gb->groupByColNum, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbSize, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuGbSize,gb->groupBySize, sizeof(int) * gb->groupByColNum, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbKey, gb->table->tupleNum * sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbIndex, sizeof(int) * gb->groupByColNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuGbIndex, gb->groupByIndex,sizeof(int) * gb->groupByColNum, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpu_hashNum,sizeof(int)*HSIZE));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpu_hashNum,0,sizeof(int)*HSIZE));
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(6, HINT_WRITE));
GMM_CALL(cudaReference(7, HINT_WRITE));
build_groupby_key<<<grid,block>>>(gpuContent,gpuGbColNum, gpuGbIndex, gpuGbType,gpuGbSize,gpuTupleNum, gpuGbKey, gpu_hashNum);
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbType));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbSize));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbIndex));
gbCount = 1;
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbCount,sizeof(int)));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(gpuGbCount, 0, sizeof(int)));
GMM_CALL(cudaReference(0, HINT_READ));
GMM_CALL(cudaReference(2, HINT_DEFAULT));
count_group_num<<<grid,block>>>(gpu_hashNum, HSIZE, gpuGbCount);
CUDA_SAFE_CALL_NO_SYNC(cudaDeviceSynchronize());
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gbCount, gpuGbCount, sizeof(int), cudaMemcpyDeviceToHost));
CUDA_SAFE_CALL(cudaMalloc((void**)&gpu_psum,HSIZE*sizeof(int)));
scanImpl(gpu_hashNum,HSIZE,gpu_psum,pp);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbCount));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_hashNum));
}
if(gbConstant == 1)
res->tupleNum = 1;
else
res->tupleNum = gbCount;
printf("(INFO) Number of groupBy results: %d\n",res->tupleNum);
char ** gpuResult = NULL;
char ** result = NULL;
result = (char **)malloc(sizeof(char*)*res->totalAttr);
CHECK_POINTER(result);
#ifdef HAS_GMM
CUDA_SAFE_CALL_NO_SYNC(cudaMallocEx((void**)&gpuResult, sizeof(char *)* res->totalAttr, HINT_PTARRAY));
#else
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuResult, sizeof(char *)* res->totalAttr));
#endif
for(int i=0; i<res->totalAttr;i++){
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&result[i], res->tupleNum * res->attrSize[i]));
CUDA_SAFE_CALL_NO_SYNC(cudaMemset(result[i], 0, res->tupleNum * res->attrSize[i]));
res->content[i] = result[i];
res->dataPos[i] = GPU;
res->attrTotalSize[i] = res->tupleNum * res->attrSize[i];
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&gpuResult[i], &result[i], sizeof(char *), cudaMemcpyHostToDevice));
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbType, sizeof(int)*res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuGbType, res->attrType, sizeof(int)*res->totalAttr, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuGbSize, sizeof(int)*res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuGbSize, res->attrSize, sizeof(int)*res->totalAttr, cudaMemcpyHostToDevice));
struct mathExp * gpuMathExp = NULL;
int * cpuFunc = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuFunc = NULL;
int * op = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuOp = NULL;
int * mathExpOffset = (int *)malloc(sizeof(int) * res->totalAttr);
int * gpuMathOffset = NULL;
int mathExpNum = 0;
for(int i=0;i<res->totalAttr;i++){
mathExpOffset[i] = mathExpNum;
cpuFunc[i] = gb->gbExp[i].func;
op[i] = gb->gbExp[i].exp.op;
if(gb->gbExp[i].exp.opNum == 2)
mathExpNum += 2;
else
mathExpNum += 1;
}
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void **)&gpuMathExp, sizeof(struct mathExp) * mathExpNum));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuMathOffset, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuMathOffset,mathExpOffset, sizeof(int) * res->totalAttr, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuFunc, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuFunc, cpuFunc, sizeof(int) * res->totalAttr, cudaMemcpyHostToDevice));
CUDA_SAFE_CALL_NO_SYNC(cudaMalloc((void**)&gpuOp, sizeof(int) * res->totalAttr));
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(gpuOp, op, sizeof(int) * res->totalAttr, cudaMemcpyHostToDevice));
for(int i=0;i<res->totalAttr;i++){
int offset = mathExpOffset[i];
if(gb->gbExp[i].exp.opNum == 2){
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&(gpuMathExp[offset]),(struct mathExp*)gb->gbExp[i].exp.exp,2*sizeof(struct mathExp), cudaMemcpyHostToDevice));
}else{
CUDA_SAFE_CALL_NO_SYNC(cudaMemcpy(&(gpuMathExp[offset]),&(gb->gbExp[i].exp),sizeof(struct mathExp), cudaMemcpyHostToDevice));
}
}
free(mathExpOffset);
free(cpuFunc);
free(op);
gpuGbColNum = res->totalAttr;
if(gbConstant !=1){
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_READ));
GMM_CALL(cudaReference(7, HINT_READ));
GMM_CALL(cudaReference(9, HINT_READ));
GMM_CALL(cudaReference(10, HINT_READ));
GMM_CALL(cudaReference(11, HINT_READ|HINT_PTARRAY|HINT_PTADEFAULT));
agg_cal<<<grid,block>>>(gpuContent, gpuGbColNum, gpuFunc, gpuOp, gpuMathExp,gpuMathOffset, gpuGbType, gpuGbSize, gpuTupleNum, gpuGbKey, gpu_psum, gpuResult);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbKey));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpu_psum));
}else {
// kaibo: gpuGbKey and gpu_psum are not allocated when gbConstant == 1, so we should not reference them in ths case
GMM_CALL(cudaReference(0, HINT_READ|HINT_PTARRAY|HINT_PTAREAD));
GMM_CALL(cudaReference(2, HINT_READ));
GMM_CALL(cudaReference(3, HINT_READ));
GMM_CALL(cudaReference(4, HINT_READ));
GMM_CALL(cudaReference(5, HINT_READ));
GMM_CALL(cudaReference(6, HINT_READ));
GMM_CALL(cudaReference(7, HINT_READ));
//GMM_CALL(cudaReference(9, HINT_READ));
//GMM_CALL(cudaReference(10, HINT_READ));
GMM_CALL(cudaReference(11, HINT_READ|HINT_PTARRAY|HINT_PTADEFAULT));
agg_cal_cons<<<grid,block>>>(gpuContent, gpuGbColNum, gpuFunc, gpuOp, gpuMathExp,gpuMathOffset, gpuGbType, gpuGbSize, gpuTupleNum, NULL, NULL, gpuResult);
}
for(int i=0; i<gb->table->totalAttr;i++){
if(gb->table->dataPos[i]==MEM)
CUDA_SAFE_CALL_NO_SYNC(cudaFree(column[i]));
}
free(column);
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuContent));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbType));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuGbSize));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuMathExp));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuMathOffset));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuFunc));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuOp));
CUDA_SAFE_CALL_NO_SYNC(cudaFree(gpuResult));
clock_gettime(CLOCK_REALTIME,&end);
double timeE = (end.tv_sec - start.tv_sec)* BILLION + end.tv_nsec - start.tv_nsec;
printf("GroupBy Time: %lf\n", timeE/(1000*1000));
return res;
}
|
fa8d238b3f5dcdbaab5e8282f6e161c0889a5e01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THH/generic/THHTensorMathMagma.hip"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, hipMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(hipMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, hipMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, bool eigenvectors)
{
#ifdef USE_MAGMA
char jobvrs = eigenvectors ? 'V' : 'N';
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<scalar_t>(n * n);
ldvr = n;
}
scalar_t *work_data = nullptr;
if (n > 0) {
int info;
scalar_t wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<scalar_t>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), hipMemcpyHostToDevice));
THCudaCheck(hipMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), hipMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, bool upper)
{
char uplo = upper ? 'U' : 'L';
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
hipStream_t stream = c10::hip::getCurrentHIPStreamMasqueradingAsCUDA();
const int len = n*n;
dim3 blocks(::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo == 'U') {
hipLaunchKernelGGL(( THCTensor_(copyUpperSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
} else {
hipLaunchKernelGGL(( THCTensor_(copyLowerSymmetric)), dim3(blocks), dim3(threads), 0, stream, input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
| fa8d238b3f5dcdbaab5e8282f6e161c0889a5e01.cu | #ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "THC/generic/THCTensorMathMagma.cu"
#else
#if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE)
#ifdef USE_MAGMA
static void THCTensor_(copyArray1d)(THCState *state, THCTensor *self, scalar_t *src, int k)
{
int64_t size[1] = { k };
int64_t stride[1] = { 1 };
THCTensor_(resizeNd)(state, self, 1, size, stride);
size_t len = k * sizeof(scalar_t);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyArray2d)(THCState *state, THCTensor *self, scalar_t *src, int m, int n)
{
int64_t size[2] = { m, n };
int64_t stride[2] = { 1, m };
THCTensor_(resizeNd)(state, self, 2, size, stride);
size_t len = m * n * sizeof(scalar_t);
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(self)) + self->storage_offset(), src, len, cudaMemcpyHostToDevice));
}
static void THCTensor_(copyTensor2d)(THCState *state, scalar_t *dst, THCTensor *self)
{
THAssert(self->dim() == 2);
size_t len = THCTensor_(nElement)(state, self)*sizeof(scalar_t);
THCTensor *temp = THCTensor_(newTranspose)(state, self, 0, 1);
THCTensor *selfc = THCTensor_(newContiguous)(state, temp);
THCudaCheck(cudaMemcpy(dst, THCStorage_(data)(state, THTensor_getStoragePtr(selfc)) + selfc->storage_offset(), len, cudaMemcpyDeviceToHost));
THCTensor_(free)(state, temp);
THCTensor_(free)(state, selfc);
}
#endif // USE_MAGMA
static THCTensor* THCTensor_(newColumnMajor)(THCState *state, THCTensor *self, THCTensor *src)
{
THAssert(src->dim() == 2);
if (self == src && self->stride(0) == 1 && self->stride(1) == self->size(0))
{
THCTensor_(retain)(state, self);
return self;
}
if (self == src)
self = THCTensor_(new)(state);
else
THCTensor_(retain)(state, self);
int64_t size[2] = { src->size(0), src->size(1) };
int64_t stride[2] = { 1, src->size(0) };
THCTensor_(resizeNd)(state, self, 2, size, stride);
THCTensor_(copy)(state, self, src);
return self;
}
void THCTensor_(gels)(THCState *state, THCTensor *rb_, THCTensor *ra_, THCTensor *b_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 1, "A should be (non-empty) 2 dimensional");
THArgCheck(!b_->is_empty() && b_->dim() == 2, 1, "b should be (non-empty) 2 dimensional");
TORCH_CHECK(a_->size(0) == b_->size(0), "Expected A and b to have same size "
"at dim 0, but A has ", a_->size(0), " rows and B has ", b_->size(0), " rows");
THArgCheck(a_->size(0) >= a_->size(1), 2, "Expected A with shape (m x n) to have "
"m >= n. The case for m < n is not implemented yet.");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
THCTensor *b = THCTensor_(newColumnMajor)(state, rb_, b_);
scalar_t *a_data = THCTensor_(data)(state, a);
scalar_t *b_data = THCTensor_(data)(state, b);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t nrhs = b->size(1);
scalar_t wkopt;
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, &wkopt, -1, &info);
#endif
scalar_t *hwork = th_magma_malloc_pinned<scalar_t>((size_t)wkopt);
#if defined(THC_REAL_IS_FLOAT)
magma_sgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#else
magma_dgels_gpu(MagmaNoTrans, m, n, nrhs, a_data, m, b_data, m, hwork, (int)wkopt, &info);
#endif
magma_free_pinned(hwork);
if (info != 0)
THError("MAGMA gels : Argument %d : illegal value", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(freeCopyTo)(state, b, rb_);
#else
THError(NoMagma(gels));
#endif
}
void THCTensor_(geev)(THCState *state, THCTensor *re_, THCTensor *rv_, THCTensor *a_, bool eigenvectors)
{
#ifdef USE_MAGMA
char jobvrs = eigenvectors ? 'V' : 'N';
THArgCheck(a_->dim() == 2, 3, "A should be 2 dimensional");
THArgCheck(a_->size(0) == a_->size(1), 3, "A should be square");
magma_vec_t jobvr = jobvrs == 'N' ? MagmaNoVec : MagmaVec;
int64_t n = a_->size(0);
scalar_t *a_data = th_magma_malloc_pinned<scalar_t>(n * n);
THCTensor_(copyTensor2d)(state, a_data, a_);
scalar_t *wr = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *wi = th_magma_malloc_pinned<scalar_t>(n);
scalar_t *vr_data = NULL;
int64_t ldvr = 1;
if (jobvr == MagmaVec)
{
vr_data = th_magma_malloc_pinned<scalar_t>(n * n);
ldvr = n;
}
scalar_t *work_data = nullptr;
if (n > 0) {
int info;
scalar_t wkopt;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, &wkopt, -1, &info);
#endif
int lwork = (int) wkopt;
work_data = th_magma_malloc_pinned<scalar_t>(lwork);
#if defined(THC_REAL_IS_FLOAT)
magma_sgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#else
magma_dgeev(MagmaNoVec, jobvr, n, a_data, n, wr, wi, NULL, 1, vr_data, ldvr, work_data, lwork, &info);
#endif
if (info > 0)
THError("MAGMA geev : Failed to converge. %d off-diagonal elements of an didn't converge to zero", info);
else if (info < 0)
THError("MAGMA geev : Argument %d : illegal value", -info);
}
{
THCTensor_(resize2d)(state, re_, 2, n);
THCTensor *re = THCTensor_(newContiguous)(state, re_);
if (n > 0) {
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset(), wr, n*sizeof(scalar_t), cudaMemcpyHostToDevice));
THCudaCheck(cudaMemcpy(THCStorage_(data)(state, THTensor_getStoragePtr(re)) + re->storage_offset() + n, wi, n*sizeof(scalar_t), cudaMemcpyHostToDevice));
}
THCTensor_(freeCopyTo)(state, re, re_);
THCTensor_(transpose)(state, re_, NULL, 0, 1);
}
if (jobvr == MagmaVec)
THCTensor_(copyArray2d)(state, rv_, vr_data, n, n);
magma_free_pinned(work_data);
magma_free_pinned(vr_data);
magma_free_pinned(wi);
magma_free_pinned(wr);
magma_free_pinned(a_data);
#else
THError(NoMagma(geev));
#endif
}
__global__ void THCTensor_(copyUpperSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r > c) {
input[idx] = input[r*n + c];
}
}
}
__global__ void THCTensor_(copyLowerSymmetric)(scalar_t *input, int n, int len)
{
for (int idx = threadIdx.x + blockIdx.x * blockDim.x; idx < len; idx += 65535) {
const int r = idx % n;
const int c = idx / n;
if (r < c) {
input[idx] = input[r*n + c];
}
}
}
void THCTensor_(potri)(THCState *state, THCTensor *ra_, THCTensor *a, bool upper)
{
char uplo = upper ? 'U' : 'L';
#ifdef USE_MAGMA
THArgCheck(!a->is_empty() && a->dim() == 2, 2, "A should be non-empty 2 dimensional");
THArgCheck(a->size(0) == a->size(1), 2, "A should be square");
int64_t n = a->size(0);
magma_uplo_t ul = uplo == 'U' ? MagmaUpper : MagmaLower;
THCTensor *input = THCTensor_(newColumnMajor)(state, ra_, a);
scalar_t *input_data = THCTensor_(data)(state, input);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_spotri_gpu(ul, n, input_data, n, &info);
#else
magma_dpotri_gpu(ul, n, input_data, n, &info);
#endif
if (info > 0)
THError("MAGMA potri : A(%d,%d) is 0, A cannot be factorized", info, info);
else if (info < 0)
THError("MAGMA potri : Argument %d : illegal value", -info);
cudaStream_t stream = c10::cuda::getCurrentCUDAStream();
const int len = n*n;
dim3 blocks(std::min(DIVUP(len, 128), 65535));
dim3 threads(128);
if (uplo == 'U') {
THCTensor_(copyUpperSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
} else {
THCTensor_(copyLowerSymmetric)<<<blocks, threads, 0, stream>>>(input_data, n, len);
}
THCTensor_(freeCopyTo)(state, input, ra_);
#else
THError(NoMagma(potri));
#endif
}
void THCTensor_(geqrf)(THCState *state, THCTensor *ra_, THCTensor *rtau_, THCTensor *a_)
{
#ifdef USE_MAGMA
THArgCheck(!a_->is_empty() && a_->dim() == 2, 2, "A should be non-empty 2 dimensional");
THCTensor *a = THCTensor_(newColumnMajor)(state, ra_, a_);
int64_t m = a->size(0);
int64_t n = a->size(1);
int64_t k = (m < n ? m : n);
#if defined(THC_REAL_IS_FLOAT)
int64_t nb = magma_get_sgeqrf_nb(m, n);
#else
int64_t nb = magma_get_dgeqrf_nb(m, n);
#endif
scalar_t *rtau_data = th_magma_malloc_pinned<scalar_t>(k);
scalar_t *a_data = THCTensor_(data)(state, a);
int info;
#if defined(THC_REAL_IS_FLOAT)
magma_sgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#else
magma_dgeqrf2_gpu(m, n, a_data, m, rtau_data, &info);
#endif
if (info != 0)
THError("MAGMA geqrf2 : Argument %d : illegal value.", -info);
THCTensor_(freeCopyTo)(state, a, ra_);
THCTensor_(copyArray1d)(state, rtau_, rtau_data, k);
magma_free_pinned(rtau_data);
#else
THError(NoMagma(geqrf));
#endif
}
#endif
#endif
|
522794f0b5f766bad81ce36252b1f4790951a654.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_minus_2_a;
int xdim0_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_minus_2_a;
int ydim0_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_minus_2_a;
int xdim1_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_minus_2_a;
int ydim1_update_halo_kernel3_minus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_minus_2_a * (y) + \
xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_minus_2_a * (y) + \
xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_minus_2_a_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(2, 0, 0)]);
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(2, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_minus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a *
ydim0_update_halo_kernel3_minus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a *
ydim1_update_halo_kernel3_minus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_minus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 109))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(109, "update_halo_kernel3_minus_2_a");
OPS_kernels[109].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_minus_2_a_h ||
ydim0 != ydim0_update_halo_kernel3_minus_2_a_h ||
xdim1 != xdim1_update_halo_kernel3_minus_2_a_h ||
ydim1 != ydim1_update_halo_kernel3_minus_2_a_h) {
hipMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_a, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_minus_2_a_h = xdim0;
hipMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_a, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_minus_2_a_h = ydim0;
hipMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_a, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_minus_2_a_h = xdim1;
hipMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_a, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_minus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[109].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_2_a), dim3(grid), dim3(tblock), 0, 0,
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[109].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[109].mpi_time += t2 - t1;
OPS_kernels[109].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[109].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
| 522794f0b5f766bad81ce36252b1f4790951a654.cu | //
// auto-generated by ops.py
//
__constant__ int xdim0_update_halo_kernel3_minus_2_a;
int xdim0_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int ydim0_update_halo_kernel3_minus_2_a;
int ydim0_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int xdim1_update_halo_kernel3_minus_2_a;
int xdim1_update_halo_kernel3_minus_2_a_h = -1;
__constant__ int ydim1_update_halo_kernel3_minus_2_a;
int ydim1_update_halo_kernel3_minus_2_a_h = -1;
#undef OPS_ACC0
#undef OPS_ACC1
#define OPS_ACC0(x, y, z) \
(x + xdim0_update_halo_kernel3_minus_2_a * (y) + \
xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a * \
(z))
#define OPS_ACC1(x, y, z) \
(x + xdim1_update_halo_kernel3_minus_2_a * (y) + \
xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a * \
(z))
// user function
__device__
inline void
update_halo_kernel3_minus_2_a_gpu(double *vol_flux_x, double *mass_flux_x,
const int *fields) {
if (fields[FIELD_VOL_FLUX_X] == 1)
vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(2, 0, 0)]);
if (fields[FIELD_MASS_FLUX_X] == 1)
mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(2, 0, 0)]);
}
#undef OPS_ACC0
#undef OPS_ACC1
__global__ void ops_update_halo_kernel3_minus_2_a(double *__restrict arg0,
double *__restrict arg1,
const int *__restrict arg2,
int size0, int size1,
int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a +
idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a *
ydim0_update_halo_kernel3_minus_2_a;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a +
idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a *
ydim1_update_halo_kernel3_minus_2_a;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
update_halo_kernel3_minus_2_a_gpu(arg0, arg1, arg2);
}
}
// host stub function
void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name,
ops_block block, int dim,
int *range, ops_arg arg0,
ops_arg arg1, ops_arg arg2) {
// Timing
double t1, t2, c1, c2;
ops_arg args[3] = {arg0, arg1, arg2};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 3, range, 109))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(109, "update_halo_kernel3_minus_2_a");
OPS_kernels[109].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != xdim0_update_halo_kernel3_minus_2_a_h ||
ydim0 != ydim0_update_halo_kernel3_minus_2_a_h ||
xdim1 != xdim1_update_halo_kernel3_minus_2_a_h ||
ydim1 != ydim1_update_halo_kernel3_minus_2_a_h) {
cudaMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_a, &xdim0,
sizeof(int));
xdim0_update_halo_kernel3_minus_2_a_h = xdim0;
cudaMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_a, &ydim0,
sizeof(int));
ydim0_update_halo_kernel3_minus_2_a_h = ydim0;
cudaMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_a, &xdim1,
sizeof(int));
xdim1_update_halo_kernel3_minus_2_a_h = xdim1;
cudaMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_a, &ydim1,
sizeof(int));
ydim1_update_halo_kernel3_minus_2_a_h = ydim1;
}
int *arg2h = (int *)arg2.data;
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d = 0; d < NUM_FIELDS; d++)
((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
char *p_a[3];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args, 3, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[109].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_update_halo_kernel3_minus_2_a<<<grid, tblock>>>(
(double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size,
z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[109].time += t1 - t2;
}
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[109].mpi_time += t2 - t1;
OPS_kernels[109].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[109].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
|
0783684de30dd118c0ba5d9ef1f62361bd29cea8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include "exercise2.cuh"
const size_t Rows = 150;
const size_t Cols = 200;
const size_t BlockSize = 8;
void exercise2()
{
int *dMatrix;
size_t pitchInBytes = 0;
checkCudaErrors(hipMallocPitch((void**)&dMatrix, &pitchInBytes, Cols * sizeof(int), Rows));
size_t pitch = pitchInBytes / sizeof(int);
dim3 grid = dim3(getNumberOfParts(Rows, BlockSize), getNumberOfParts(Cols, BlockSize));
dim3 block = dim3(BlockSize, BlockSize);
fill << <grid, block >> > (dMatrix, Rows, Cols, pitch);
checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix");
increment << <grid, block >> > (dMatrix, Rows, Cols, pitch);
checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix");
int *expectedMatrix = new int[Rows * Cols];
for (size_t i = 0; i < Rows * Cols; i++)
expectedMatrix[i] = i + 1;
int *matrix = new int[pitch * Rows];
checkCudaErrors(hipMemcpy2D(matrix, pitchInBytes, dMatrix, pitchInBytes, Cols * sizeof(int), Rows, hipMemcpyDeviceToHost));
checkHostMatrix(matrix, pitchInBytes, Rows, Cols, "%-3d ", "matrix");
delete[] matrix;
delete[] expectedMatrix;
hipFree(dMatrix);
}
__global__ void fill(int* matrix, size_t rows, size_t cols, size_t pitch)
{
int row = blockIdx.x * BlockSize + threadIdx.x;
int col = blockIdx.y * BlockSize + threadIdx.y;
if (row >= rows || col >= cols)
{
return;
}
matrix[row * pitch + col] = col * rows + row;
}
__global__ void increment(int* matrix, size_t rows, size_t cols, size_t pitch)
{
int row = blockIdx.x * BlockSize + threadIdx.x;
int col = blockIdx.y * BlockSize + threadIdx.y;
if (row >= rows || col >= cols)
{
return;
}
matrix[row * pitch + col]++;
} | 0783684de30dd118c0ba5d9ef1f62361bd29cea8.cu | #include <cudaDefs.h>
#include "exercise2.cuh"
const size_t Rows = 150;
const size_t Cols = 200;
const size_t BlockSize = 8;
void exercise2()
{
int *dMatrix;
size_t pitchInBytes = 0;
checkCudaErrors(cudaMallocPitch((void**)&dMatrix, &pitchInBytes, Cols * sizeof(int), Rows));
size_t pitch = pitchInBytes / sizeof(int);
dim3 grid = dim3(getNumberOfParts(Rows, BlockSize), getNumberOfParts(Cols, BlockSize));
dim3 block = dim3(BlockSize, BlockSize);
fill << <grid, block >> > (dMatrix, Rows, Cols, pitch);
checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix");
increment << <grid, block >> > (dMatrix, Rows, Cols, pitch);
checkDeviceMatrix(dMatrix, pitchInBytes, Rows, Cols, "%-3d ", "dMatrix");
int *expectedMatrix = new int[Rows * Cols];
for (size_t i = 0; i < Rows * Cols; i++)
expectedMatrix[i] = i + 1;
int *matrix = new int[pitch * Rows];
checkCudaErrors(cudaMemcpy2D(matrix, pitchInBytes, dMatrix, pitchInBytes, Cols * sizeof(int), Rows, cudaMemcpyDeviceToHost));
checkHostMatrix(matrix, pitchInBytes, Rows, Cols, "%-3d ", "matrix");
delete[] matrix;
delete[] expectedMatrix;
cudaFree(dMatrix);
}
__global__ void fill(int* matrix, size_t rows, size_t cols, size_t pitch)
{
int row = blockIdx.x * BlockSize + threadIdx.x;
int col = blockIdx.y * BlockSize + threadIdx.y;
if (row >= rows || col >= cols)
{
return;
}
matrix[row * pitch + col] = col * rows + row;
}
__global__ void increment(int* matrix, size_t rows, size_t cols, size_t pitch)
{
int row = blockIdx.x * BlockSize + threadIdx.x;
int col = blockIdx.y * BlockSize + threadIdx.y;
if (row >= rows || col >= cols)
{
return;
}
matrix[row * pitch + col]++;
} |
7ff199ff87fbd156bc03bd931c68472aff493c43.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#define N 1024
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N];
int temp = x[threadIdx.x] * y[threadIdx.x];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N ; i++) sum += s_tmp[i];
*r = sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
hipMalloc((void **)&d_a, size);
hipMalloc((void **)&d_b, size);
hipMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// hipMemcpyHostToDevice is a flag determining copying from host to dev.
hipMemcpy(d_a, a, size, hipMemcpyHostToDevice);
hipMemcpy(d_b, b, size, hipMemcpyHostToDevice);
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
hipLaunchKernelGGL(( dotVecs), dim3(1),dim3(N), 0, 0, d_a, d_b, d_c);
// Copy results from device to host
// hipMemcpy blocks CPU until Kernels finish execution
hipMemcpy(c, d_c, sizeof(int), hipMemcpyDeviceToHost);
printf("%d\n",*c);
// needs hipFree to deallocate device pointers
hipFree(d_a); hipFree(d_b); hipFree(d_c);
free(a); free(b); free(c);
return 0;
}
| 7ff199ff87fbd156bc03bd931c68472aff493c43.cu | #include <stdio.h>
#include <stdlib.h>
#define N 1024
void fill_ints(int* a, int size){
for(int i =0; i<size; i++)
a[i]=i;
}
__global__ void dotVecs(int *x, int *y, int *r){
__shared__ int s_tmp[N];
int temp = x[threadIdx.x] * y[threadIdx.x];
s_tmp[threadIdx.x] = temp; // store the multiplication to the shared memory
__syncthreads();
// Thread 0 performs the reduction
if(threadIdx.x == 0){
int sum = 0;
for(int i = 0 ; i < N ; i++) sum += s_tmp[i];
*r = sum;
}
}
int main(void){
int *a, *b, *c; // host pointers
int *d_a, *d_b, *d_c; // device pointers
int size = N * sizeof(int);
// Alloc space for device copies of a, b, c
cudaMalloc((void **)&d_a, size);
cudaMalloc((void **)&d_b, size);
cudaMalloc((void **)&d_c, sizeof(int));
a = (int *)malloc(size);
fill_ints(a, N); // Alloc space host, random initialization
b = (int *)malloc(size);
fill_ints(b, N);
c = (int *)malloc(sizeof(int));
// Copy data from host to device memory
// cudaMemcpyHostToDevice is a flag determining copying from host to dev.
cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice);
// Launch kernel to add two vector with N threads and 1 block
// Kernel calls are asynchronous
dotVecs<<<1,N>>>(d_a, d_b, d_c);
// Copy results from device to host
// cudaMemcpy blocks CPU until Kernels finish execution
cudaMemcpy(c, d_c, sizeof(int), cudaMemcpyDeviceToHost);
printf("%d\n",*c);
// needs cudaFree to deallocate device pointers
cudaFree(d_a); cudaFree(d_b); cudaFree(d_c);
free(a); free(b); free(c);
return 0;
}
|
991c4aa7cdaa631d5b404ad566221a2e5747628d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "saxpy_baseline.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float a = 2;
clock_t *timer_vals = NULL;
hipMalloc(&timer_vals, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
saxpy_baseline), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
saxpy_baseline), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
saxpy_baseline), dim3(gridBlock),dim3(threadBlock), 0, 0, y,x,a,timer_vals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 991c4aa7cdaa631d5b404ad566221a2e5747628d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "saxpy_baseline.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float a = 2;
clock_t *timer_vals = NULL;
cudaMalloc(&timer_vals, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
saxpy_baseline<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
saxpy_baseline<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
saxpy_baseline<<<gridBlock,threadBlock>>>(y,x,a,timer_vals);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
98065dfef7874119e885aeadda40ed87c45d2bc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
__shared__ float data[TILE_W + KERNEL_RADIUS * 2][TILE_W + KERNEL_RADIUS * 2];
// global mem address of this thread
const int gLoc = threadIdx.x +
IMUL(blockIdx.x, blockDim.x) +
IMUL(threadIdx.y, dataW) +
IMUL(blockIdx.y, blockDim.y) * dataW;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int x, y; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + IMUL(blockIdx.x, blockDim.x);
const int y0 = threadIdx.y + IMUL(blockIdx.y, blockDim.y);
// case1: upper left
x = x0 - KERNEL_RADIUS;
y = y0 - KERNEL_RADIUS;
if ( x < 0 || y < 0 )
data[threadIdx.x][threadIdx.y] = 0;
else
data[threadIdx.x][threadIdx.y] = d_Data[ gLoc - KERNEL_RADIUS - IMUL(dataW, KERNEL_RADIUS)];
// case2: upper right
x = x0 + KERNEL_RADIUS;
y = y0 - KERNEL_RADIUS;
if ( x > dataW-1 || y < 0 )
data[threadIdx.x + blockDim.x][threadIdx.y] = 0;
else
data[threadIdx.x + blockDim.x][threadIdx.y] = d_Data[gLoc + KERNEL_RADIUS - IMUL(dataW, KERNEL_RADIUS)];
// case3: lower left
x = x0 - KERNEL_RADIUS;
y = y0 + KERNEL_RADIUS;
if (x < 0 || y > dataH-1)
data[threadIdx.x][threadIdx.y + blockDim.y] = 0;
else
data[threadIdx.x][threadIdx.y + blockDim.y] = d_Data[gLoc - KERNEL_RADIUS + IMUL(dataW, KERNEL_RADIUS)];
// case4: lower right
x = x0 + KERNEL_RADIUS;
y = y0 + KERNEL_RADIUS;
if ( x > dataW-1 || y > dataH-1)
data[threadIdx.x + blockDim.x][threadIdx.y + blockDim.y] = 0;
else
data[threadIdx.x + blockDim.x][threadIdx.y + blockDim.y] = d_Data[gLoc + KERNEL_RADIUS + IMUL(dataW, KERNEL_RADIUS)];
__syncthreads();
// convolution
float sum = 0;
x = KERNEL_RADIUS + threadIdx.x;
y = KERNEL_RADIUS + threadIdx.y;
for (int i = -KERNEL_RADIUS; i <= KERNEL_RADIUS; i++)
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
sum += data[x + i][y + j] * d_Kernel[KERNEL_RADIUS + j] * d_Kernel[KERNEL_RADIUS + i];
d_Result[gLoc] = sum;
}
| 98065dfef7874119e885aeadda40ed87c45d2bc2.cu | __shared__ float data[TILE_W + KERNEL_RADIUS * 2][TILE_W + KERNEL_RADIUS * 2];
// global mem address of this thread
const int gLoc = threadIdx.x +
IMUL(blockIdx.x, blockDim.x) +
IMUL(threadIdx.y, dataW) +
IMUL(blockIdx.y, blockDim.y) * dataW;
// load cache (32x32 shared memory, 16x16 threads blocks)
// each threads loads four values from global memory into shared mem
// if in image area, get value in global mem, else 0
int x, y; // image based coordinate
// original image based coordinate
const int x0 = threadIdx.x + IMUL(blockIdx.x, blockDim.x);
const int y0 = threadIdx.y + IMUL(blockIdx.y, blockDim.y);
// case1: upper left
x = x0 - KERNEL_RADIUS;
y = y0 - KERNEL_RADIUS;
if ( x < 0 || y < 0 )
data[threadIdx.x][threadIdx.y] = 0;
else
data[threadIdx.x][threadIdx.y] = d_Data[ gLoc - KERNEL_RADIUS - IMUL(dataW, KERNEL_RADIUS)];
// case2: upper right
x = x0 + KERNEL_RADIUS;
y = y0 - KERNEL_RADIUS;
if ( x > dataW-1 || y < 0 )
data[threadIdx.x + blockDim.x][threadIdx.y] = 0;
else
data[threadIdx.x + blockDim.x][threadIdx.y] = d_Data[gLoc + KERNEL_RADIUS - IMUL(dataW, KERNEL_RADIUS)];
// case3: lower left
x = x0 - KERNEL_RADIUS;
y = y0 + KERNEL_RADIUS;
if (x < 0 || y > dataH-1)
data[threadIdx.x][threadIdx.y + blockDim.y] = 0;
else
data[threadIdx.x][threadIdx.y + blockDim.y] = d_Data[gLoc - KERNEL_RADIUS + IMUL(dataW, KERNEL_RADIUS)];
// case4: lower right
x = x0 + KERNEL_RADIUS;
y = y0 + KERNEL_RADIUS;
if ( x > dataW-1 || y > dataH-1)
data[threadIdx.x + blockDim.x][threadIdx.y + blockDim.y] = 0;
else
data[threadIdx.x + blockDim.x][threadIdx.y + blockDim.y] = d_Data[gLoc + KERNEL_RADIUS + IMUL(dataW, KERNEL_RADIUS)];
__syncthreads();
// convolution
float sum = 0;
x = KERNEL_RADIUS + threadIdx.x;
y = KERNEL_RADIUS + threadIdx.y;
for (int i = -KERNEL_RADIUS; i <= KERNEL_RADIUS; i++)
for (int j = -KERNEL_RADIUS; j <= KERNEL_RADIUS; j++)
sum += data[x + i][y + j] * d_Kernel[KERNEL_RADIUS + j] * d_Kernel[KERNEL_RADIUS + i];
d_Result[gLoc] = sum;
}
|
6a3086bb2f0cbb409b6ac8444eb6d663916ac8fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
struct Point {
double* x;
double* y;
double* z;
};
struct Ref {
Point pos;
Point dir;
double* distance;
};
struct View {
int size;
double3* pos;
double3* dir;
double* distance;
__device__ Ref operator[](int i) const {
double3* newpos = pos + i;
double3* newdir = dir + i;
return {{&(newpos->x), &(newpos->y), &(newpos->z)},
{&(newdir->x), &(newdir->y), &(newdir->z)},
distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
*ref.pos.x += *ref.dir.x * nextdist;
*ref.pos.y += *ref.dir.y * nextdist;
*ref.pos.z += *ref.dir.z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
| 6a3086bb2f0cbb409b6ac8444eb6d663916ac8fd.cu | struct Point {
double* x;
double* y;
double* z;
};
struct Ref {
Point pos;
Point dir;
double* distance;
};
struct View {
int size;
double3* pos;
double3* dir;
double* distance;
__device__ Ref operator[](int i) const {
double3* newpos = pos + i;
double3* newdir = dir + i;
return {{&(newpos->x), &(newpos->y), &(newpos->z)},
{&(newdir->x), &(newdir->y), &(newdir->z)},
distance + i};
}
};
__device__ inline void move_impl(const Ref& ref) {
const double nextdist = *ref.distance;
*ref.pos.x += *ref.dir.x * nextdist;
*ref.pos.y += *ref.dir.y * nextdist;
*ref.pos.z += *ref.dir.z * nextdist;
}
__global__ void move(View view) {
const int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < view.size) {
move_impl(view[idx]);
}
}
|
3fe00255a661e4e05b7b6cdb8e618af436d167b0.hip | // !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "BondedGroupData.cuh"
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file BondedGroupData.cu
\brief Implements the helper functions (GPU version) for updating the GPU bonded group tables
*/
namespace hoomd
{
template<unsigned int group_size, typename group_t>
__global__ void gpu_count_groups_kernel(const unsigned int n_groups,
const group_t* d_group_table,
const unsigned int* d_rtag,
unsigned int* d_scratch_idx,
unsigned int* d_scratch_g,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_groups)
return;
group_t g = d_group_table[group_idx];
for (unsigned int i = 0; i < group_size; ++i)
{
unsigned int tag_i = g.tag[i];
unsigned int pidx_i = d_rtag[tag_i];
// detect incomplete groups
if (pidx_i == NOT_LOCAL)
atomicMax(d_condition, next_flag + 1 + group_idx);
// write out group_idx to temporary array
d_scratch_g[i * n_groups + group_idx] = group_idx;
d_scratch_idx[i * n_groups + group_idx] = pidx_i;
// atomically increment number of groups
unsigned int n = 0;
if (pidx_i != NOT_LOCAL)
n = atomicInc(&d_n_groups[pidx_i], 0xffffffff);
if (n >= max_n_groups)
// set flag to indicate we need to grow the output array
atomicMax(d_condition, next_flag);
}
}
template<unsigned int group_size, typename group_t>
__global__ void gpu_group_scatter_kernel(unsigned int n_scratch,
const unsigned int* d_scratch_g,
const unsigned int* d_scratch_idx,
const unsigned int* d_offset,
const group_t* d_members,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
unsigned int pidx_group_table_pitch,
bool has_type_mapping)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_scratch)
return;
unsigned int pidx = d_scratch_idx[i];
unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx;
// load group
unsigned int group_idx = d_scratch_g[i];
group_t g = d_members[group_idx];
// construct compact group representation, excluding particle pidx
group_t p;
if (has_type_mapping)
{
// last element = group type
p.idx[group_size - 1] = d_group_typeval[group_idx].type;
}
else
{
// last element = group index
p.idx[group_size - 1] = group_idx;
}
unsigned int j = 0;
// position in group
unsigned int gpos = 0;
for (unsigned int k = 0; k < group_size; ++k)
{
unsigned int tag_k = g.tag[k];
unsigned int pidx_k = d_rtag[tag_k];
if (pidx_k == pidx)
{
gpos = k;
continue;
}
p.idx[j++] = pidx_k;
}
d_pidx_group_table[offset] = p;
d_pidx_gpos_table[offset] = gpos;
}
template<unsigned int group_size, typename group_t>
void gpu_update_group_table(const unsigned int n_groups,
const unsigned int N,
const group_t* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc)
{
// construct scratch table by expanding the group table by particle index
unsigned int block_size = 256;
unsigned n_blocks = n_groups / block_size + 1;
// reset number of groups
hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>),
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups,
d_group_table,
d_rtag,
d_scratch_idx,
d_scratch_g,
d_n_groups,
max_n_groups,
d_condition,
next_flag);
// read back flag
hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (!(flag >= next_flag) && n_groups)
{
// we are good, fill group table
// sort groups by particle idx
thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx);
thrust::device_ptr<unsigned int> scratch_g(d_scratch_g);
#ifdef __HIP_PLATFORM_HCC__
thrust::sort_by_key(thrust::hip::par(alloc),
#else
thrust::sort_by_key(thrust::hip::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
scratch_g);
// perform a segmented scan of d_scratch_idx
thrust::device_ptr<unsigned int> offsets(d_offsets);
thrust::constant_iterator<unsigned int> const_it(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
const_it,
offsets);
// scatter groups to destinations
block_size = 256;
n_blocks = (group_size * n_groups) / block_size + 1;
hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>,
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups * group_size,
d_scratch_g,
d_scratch_idx,
d_offsets,
d_group_table,
d_group_typeval,
d_rtag,
d_pidx_group_table,
d_pidx_gpos_table,
pidx_group_table_pitch,
has_type_mapping);
}
}
/*
* Explicit template instantiations
*/
//! BondData
template void gpu_update_group_table<2>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<2>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<2>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! AngleData
template void gpu_update_group_table<3>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<3>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<3>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! DihedralData and ImproperData
template void gpu_update_group_table<4>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<4>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<4>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! MeshTriangleData
template void gpu_update_group_table<6>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<6>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<6>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
} // end namespace hoomd
| 3fe00255a661e4e05b7b6cdb8e618af436d167b0.cu | // Copyright (c) 2009-2022 The Regents of the University of Michigan.
// Part of HOOMD-blue, released under the BSD 3-Clause License.
#include "BondedGroupData.cuh"
#include "ParticleData.cuh"
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wconversion"
#include <hip/hip_runtime.h>
#include <thrust/device_ptr.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/constant_iterator.h>
#include <thrust/scan.h>
#include <thrust/sort.h>
#pragma GCC diagnostic pop
/*! \file BondedGroupData.cu
\brief Implements the helper functions (GPU version) for updating the GPU bonded group tables
*/
namespace hoomd
{
template<unsigned int group_size, typename group_t>
__global__ void gpu_count_groups_kernel(const unsigned int n_groups,
const group_t* d_group_table,
const unsigned int* d_rtag,
unsigned int* d_scratch_idx,
unsigned int* d_scratch_g,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag)
{
unsigned int group_idx = blockIdx.x * blockDim.x + threadIdx.x;
if (group_idx >= n_groups)
return;
group_t g = d_group_table[group_idx];
for (unsigned int i = 0; i < group_size; ++i)
{
unsigned int tag_i = g.tag[i];
unsigned int pidx_i = d_rtag[tag_i];
// detect incomplete groups
if (pidx_i == NOT_LOCAL)
atomicMax(d_condition, next_flag + 1 + group_idx);
// write out group_idx to temporary array
d_scratch_g[i * n_groups + group_idx] = group_idx;
d_scratch_idx[i * n_groups + group_idx] = pidx_i;
// atomically increment number of groups
unsigned int n = 0;
if (pidx_i != NOT_LOCAL)
n = atomicInc(&d_n_groups[pidx_i], 0xffffffff);
if (n >= max_n_groups)
// set flag to indicate we need to grow the output array
atomicMax(d_condition, next_flag);
}
}
template<unsigned int group_size, typename group_t>
__global__ void gpu_group_scatter_kernel(unsigned int n_scratch,
const unsigned int* d_scratch_g,
const unsigned int* d_scratch_idx,
const unsigned int* d_offset,
const group_t* d_members,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
unsigned int pidx_group_table_pitch,
bool has_type_mapping)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= n_scratch)
return;
unsigned int pidx = d_scratch_idx[i];
unsigned int offset = d_offset[i] * pidx_group_table_pitch + pidx;
// load group
unsigned int group_idx = d_scratch_g[i];
group_t g = d_members[group_idx];
// construct compact group representation, excluding particle pidx
group_t p;
if (has_type_mapping)
{
// last element = group type
p.idx[group_size - 1] = d_group_typeval[group_idx].type;
}
else
{
// last element = group index
p.idx[group_size - 1] = group_idx;
}
unsigned int j = 0;
// position in group
unsigned int gpos = 0;
for (unsigned int k = 0; k < group_size; ++k)
{
unsigned int tag_k = g.tag[k];
unsigned int pidx_k = d_rtag[tag_k];
if (pidx_k == pidx)
{
gpos = k;
continue;
}
p.idx[j++] = pidx_k;
}
d_pidx_group_table[offset] = p;
d_pidx_gpos_table[offset] = gpos;
}
template<unsigned int group_size, typename group_t>
void gpu_update_group_table(const unsigned int n_groups,
const unsigned int N,
const group_t* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_t* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc)
{
// construct scratch table by expanding the group table by particle index
unsigned int block_size = 256;
unsigned n_blocks = n_groups / block_size + 1;
// reset number of groups
hipMemsetAsync(d_n_groups, 0, sizeof(unsigned int) * N);
hipLaunchKernelGGL(HIP_KERNEL_NAME(gpu_count_groups_kernel<group_size>),
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups,
d_group_table,
d_rtag,
d_scratch_idx,
d_scratch_g,
d_n_groups,
max_n_groups,
d_condition,
next_flag);
// read back flag
hipMemcpy(&flag, d_condition, sizeof(unsigned int), hipMemcpyDeviceToHost);
if (!(flag >= next_flag) && n_groups)
{
// we are good, fill group table
// sort groups by particle idx
thrust::device_ptr<unsigned int> scratch_idx(d_scratch_idx);
thrust::device_ptr<unsigned int> scratch_g(d_scratch_g);
#ifdef __HIP_PLATFORM_HCC__
thrust::sort_by_key(thrust::hip::par(alloc),
#else
thrust::sort_by_key(thrust::cuda::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
scratch_g);
// perform a segmented scan of d_scratch_idx
thrust::device_ptr<unsigned int> offsets(d_offsets);
thrust::constant_iterator<unsigned int> const_it(1);
#ifdef __HIP_PLATFORM_HCC__
thrust::exclusive_scan_by_key(thrust::hip::par(alloc),
#else
thrust::exclusive_scan_by_key(thrust::cuda::par(alloc),
#endif
scratch_idx,
scratch_idx + group_size * n_groups,
const_it,
offsets);
// scatter groups to destinations
block_size = 256;
n_blocks = (group_size * n_groups) / block_size + 1;
hipLaunchKernelGGL(gpu_group_scatter_kernel<group_size>,
dim3(n_blocks),
dim3(block_size),
0,
0,
n_groups * group_size,
d_scratch_g,
d_scratch_idx,
d_offsets,
d_group_table,
d_group_typeval,
d_rtag,
d_pidx_group_table,
d_pidx_gpos_table,
pidx_group_table_pitch,
has_type_mapping);
}
}
/*
* Explicit template instantiations
*/
//! BondData
template void gpu_update_group_table<2>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<2>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<2>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! AngleData
template void gpu_update_group_table<3>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<3>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<3>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! DihedralData and ImproperData
template void gpu_update_group_table<4>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<4>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<4>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
//! MeshTriangleData
template void gpu_update_group_table<6>(const unsigned int n_groups,
const unsigned int N,
const union group_storage<6>* d_group_table,
const typeval_union* d_group_typeval,
const unsigned int* d_rtag,
unsigned int* d_n_groups,
unsigned int max_n_groups,
unsigned int* d_condition,
unsigned int next_flag,
unsigned int& flag,
group_storage<6>* d_pidx_group_table,
unsigned int* d_pidx_gpos_table,
const unsigned int pidx_group_table_pitch,
unsigned int* d_scratch_g,
unsigned int* d_scratch_idx,
unsigned int* d_offsets,
bool has_type_mapping,
CachedAllocator& alloc);
} // end namespace hoomd
|
66716a8a6fbdc407be7410a1e4b8f8cbe4c7485a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
namespace lbfgsbcuda {
namespace matupd {
__global__
void kernel0
(
int n,
real* wy,
const real* r,
const int iPitch)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
wy[i * iPitch] = r[i];
}
__global__
void kernel1
(
real* sy,
const int iPitch_i,
const int iPitch_j,
const int col
)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
__shared__ real sdata[8][8];
sdata[j][i] = sy[j * iPitch_i + i * iPitch_j];
if(i >= col - 1 || j >= col - 1 || i > j)
return;
__syncthreads();
sy[j * iPitch_i + i * iPitch_j] = sdata[j + 1][i + 1];
}
template<int bx>
__global__
void kernel20(
const int n,
const int head,
const int m,
const int col,
const int iPitch,
const int oPitch,
const real* d,
real* buf_array_p,
const real* wy)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
int pointr = Modular((head + j), m);
if(i < n) {
mySum = d[i] * wy[i * iPitch + pointr];
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_array_p[j * oPitch + blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel21(
const int n,
const int iPitch,
const int oPitch,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[j * iPitch + i];
else
mySum = 0;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if(tid == 0) {
buf_out[j * oPitch + blockIdx.x] = mySum;
}
}
void prog0(
const int& n,
const int& m,
real* wy,
real* sy,
const real* r,
const real* d,
int& itail,
const int& iupdat,
int& col,
int& head,
const real& dr,
const int& iPitch0,
const int& iPitch_i,
const int& iPitch_j,
real* buf_array_p,
const int& iPitch_normal,
hipStream_t st)
{
CheckBuffer(wy, m, n * m);
hipLaunchKernelGGL(( kernel0), dim3(dim3(iDivUp(n, 512))), dim3(dim3(512)), 0, st,
n, wy + itail, r, iPitch0);
CheckBuffer(wy, m, n * m);
if( iupdat > m )
{
CheckBuffer(sy, iPitch_i, col * iPitch_i);
hipLaunchKernelGGL(( kernel1), dim3(1), dim3(dim3(col, col)), 0, st,
sy, iPitch_i, iPitch_j, col);
}
if(col > 1) {
CheckBuffer(sy, iPitch_i, col * iPitch_i);
int nblock0 = n;
int mi = log2Up(nblock0);
int nblock1 = iDivUp2(nblock0, mi);
real* oFinal = sy + (col - 1) * iPitch_i;
real* output = (nblock1 == 1) ? oFinal : buf_array_p;
int op20 = (nblock1 == 1) ? iPitch_j : iPitch_normal;
dynamicCall(kernel20, mi, nblock1, col - 1, st, (nblock0, head, m, col, iPitch0, op20, d, output, wy));
/*
kernel20<<<dim3(nblock1, col - 1), dim3(512), 0, st>>>
(nblock0, head, m, col, iPitch0, op20, d, output, wy);*/
nblock0 = nblock1;
//Launch Ker 0
while(nblock0 > 1) {
nblock1 = iDivUp2(nblock0, mi);
real* input = output;
output = (nblock1 == 1) ? oFinal : (output + nblock0);
int op20 = (nblock1 == 1) ? iPitch_j : iPitch_normal;
dynamicCall(kernel21, mi, nblock1, col - 1, st, (nblock0, iPitch_normal, op20, input, output));
/*
kernel21<<<dim3(nblock1, col - 1), dim3(512), 0, st>>>
(nblock0, n, op20, input, output);*/
nblock0 = nblock1;
}
CheckBuffer(sy, iPitch_i, col * iPitch_i);
}
hipMemcpyAsync(sy + (col - 1) * iPitch0 + col - 1, &dr, sizeof(real), hipMemcpyHostToDevice, st);
CheckBuffer(sy, iPitch_i, col * iPitch_i);
}
};
}; | 66716a8a6fbdc407be7410a1e4b8f8cbe4c7485a.cu | /*************************************************************************
GPU Version:
Tsinghua University, Aug. 2012.
Written by Yun Fei in collaboration with
W. Wang and B. Wang
Original:
Optimization Technology Center.
Argonne National Laboratory and Northwestern University.
Written by Ciyou Zhu in collaboration with
R.H. Byrd, P. Lu-Chen and J. Nocedal.
Contributors:
* Sergey Bochkanov (ALGLIB project). Translation from FORTRAN to
pseudocode.
This software is freely available, but we expect that all publications
describing work using this software, or all commercial products using it,
quote at least one of the references given below:
* R. H. Byrd, P. Lu and J. Nocedal. A Limited Memory Algorithm for
Bound Constrained Optimization, (1995), SIAM Journal on Scientific
and Statistical Computing , 16, 5, pp. 1190-1208.
* C. Zhu, R.H. Byrd and J. Nocedal. L-BFGS-B: Algorithm 778: L-BFGS-B,
FORTRAN routines for large scale bound constrained optimization
(1997), ACM Transactions on Mathematical Software, Vol 23, Num. 4,
pp. 550 - 560.
*************************************************************************/
#include "lbfgsbcuda.h"
namespace lbfgsbcuda {
namespace matupd {
__global__
void kernel0
(
int n,
real* wy,
const real* r,
const int iPitch)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i >= n)
return;
wy[i * iPitch] = r[i];
}
__global__
void kernel1
(
real* sy,
const int iPitch_i,
const int iPitch_j,
const int col
)
{
const int i = threadIdx.x;
const int j = threadIdx.y;
__shared__ real sdata[8][8];
sdata[j][i] = sy[j * iPitch_i + i * iPitch_j];
if(i >= col - 1 || j >= col - 1 || i > j)
return;
__syncthreads();
sy[j * iPitch_i + i * iPitch_j] = sdata[j + 1][i + 1];
}
template<int bx>
__global__
void kernel20(
const int n,
const int head,
const int m,
const int col,
const int iPitch,
const int oPitch,
const real* d,
real* buf_array_p,
const real* wy)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
int pointr = Modular((head + j), m);
if(i < n) {
mySum = d[i] * wy[i * iPitch + pointr];
} else {
mySum = 0;
}
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if (tid == 0)
buf_array_p[j * oPitch + blockIdx.x] = mySum;
}
template<int bx>
__global__
void kernel21(
const int n,
const int iPitch,
const int oPitch,
const real* buf_in,
real* buf_out)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y;
const int tid = threadIdx.x;
volatile __shared__ real sdata[bx];
real mySum;
if(i < n)
mySum = buf_in[j * iPitch + i];
else
mySum = 0;
sdata[tid] = mySum;
__syncthreads();
if(bx > 512) {if (tid < 512) { sdata[tid] = mySum = (mySum + sdata[tid + 512]); } __syncthreads();}
if(bx > 256) {if (tid < 256) { sdata[tid] = mySum = (mySum + sdata[tid + 256]); } __syncthreads();}
if(bx > 128) {if (tid < 128) { sdata[tid] = mySum = (mySum + sdata[tid + 128]); } __syncthreads();}
if(bx > 64) {if (tid < 64) { sdata[tid] = mySum = (mySum + sdata[tid + 64]); } __syncthreads();}
if (tid < __min(bx / 2, 32))
{
// now that we are using warp-synchronous programming (below)
// we need to declare our shared memory volatile so that the compiler
// doesn't reorder stores to it and induce incorrect behavior.
volatile real* smem = sdata + tid;
if(bx > 32) {*smem = mySum = mySum + smem[32];}
if(bx > 16) {*smem = mySum = mySum + smem[16];}
if(bx > 8) {*smem = mySum = mySum + smem[8];}
if(bx > 4) {*smem = mySum = mySum + smem[4];}
if(bx > 2) {*smem = mySum = mySum + smem[2];}
if(bx > 1) {*smem = mySum = mySum + smem[1];}
}
if(tid == 0) {
buf_out[j * oPitch + blockIdx.x] = mySum;
}
}
void prog0(
const int& n,
const int& m,
real* wy,
real* sy,
const real* r,
const real* d,
int& itail,
const int& iupdat,
int& col,
int& head,
const real& dr,
const int& iPitch0,
const int& iPitch_i,
const int& iPitch_j,
real* buf_array_p,
const int& iPitch_normal,
cudaStream_t st)
{
CheckBuffer(wy, m, n * m);
kernel0<<<dim3(iDivUp(n, 512)), dim3(512), 0, st>>>
(n, wy + itail, r, iPitch0);
CheckBuffer(wy, m, n * m);
if( iupdat > m )
{
CheckBuffer(sy, iPitch_i, col * iPitch_i);
kernel1<<<1, dim3(col, col), 0, st>>>
(sy, iPitch_i, iPitch_j, col);
}
if(col > 1) {
CheckBuffer(sy, iPitch_i, col * iPitch_i);
int nblock0 = n;
int mi = log2Up(nblock0);
int nblock1 = iDivUp2(nblock0, mi);
real* oFinal = sy + (col - 1) * iPitch_i;
real* output = (nblock1 == 1) ? oFinal : buf_array_p;
int op20 = (nblock1 == 1) ? iPitch_j : iPitch_normal;
dynamicCall(kernel20, mi, nblock1, col - 1, st, (nblock0, head, m, col, iPitch0, op20, d, output, wy));
/*
kernel20<<<dim3(nblock1, col - 1), dim3(512), 0, st>>>
(nblock0, head, m, col, iPitch0, op20, d, output, wy);*/
nblock0 = nblock1;
//Launch Ker 0
while(nblock0 > 1) {
nblock1 = iDivUp2(nblock0, mi);
real* input = output;
output = (nblock1 == 1) ? oFinal : (output + nblock0);
int op20 = (nblock1 == 1) ? iPitch_j : iPitch_normal;
dynamicCall(kernel21, mi, nblock1, col - 1, st, (nblock0, iPitch_normal, op20, input, output));
/*
kernel21<<<dim3(nblock1, col - 1), dim3(512), 0, st>>>
(nblock0, n, op20, input, output);*/
nblock0 = nblock1;
}
CheckBuffer(sy, iPitch_i, col * iPitch_i);
}
cudaMemcpyAsync(sy + (col - 1) * iPitch0 + col - 1, &dr, sizeof(real), cudaMemcpyHostToDevice, st);
CheckBuffer(sy, iPitch_i, col * iPitch_i);
}
};
}; |
3b6921e6690d9bc0d4f1a32ecd51e09e0dbc7551.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "updateExCovX.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *e_x_cov_x = NULL;
hipMalloc(&e_x_cov_x, XSIZE*YSIZE);
double gauss_d2 = 1;
int valid_voxel_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
updateExCovX), dim3(gridBlock),dim3(threadBlock), 0, 0, e_x_cov_x,gauss_d2,valid_voxel_num);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
updateExCovX), dim3(gridBlock),dim3(threadBlock), 0, 0, e_x_cov_x,gauss_d2,valid_voxel_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
updateExCovX), dim3(gridBlock),dim3(threadBlock), 0, 0, e_x_cov_x,gauss_d2,valid_voxel_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3b6921e6690d9bc0d4f1a32ecd51e09e0dbc7551.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "updateExCovX.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
double *e_x_cov_x = NULL;
cudaMalloc(&e_x_cov_x, XSIZE*YSIZE);
double gauss_d2 = 1;
int valid_voxel_num = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
updateExCovX<<<gridBlock,threadBlock>>>(e_x_cov_x,gauss_d2,valid_voxel_num);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
updateExCovX<<<gridBlock,threadBlock>>>(e_x_cov_x,gauss_d2,valid_voxel_num);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
updateExCovX<<<gridBlock,threadBlock>>>(e_x_cov_x,gauss_d2,valid_voxel_num);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
efb2f9604df3c82b05098f42c50c015ecee2dca6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void addElement(int *a,int *b,int *t)
{
int v = threadIdx.y;
int n = v*blockDim.x+threadIdx.x;
t[n] = a[n]+b[n];
}
__global__ void addCol(int *a , int *b , int *t)
{
int lp =0;
int index = threadIdx.x;
for(lp = 0 ;lp<blockDim.x;lp++)
{
t[index] = a[index]+b[index];
index += blockDim.x;
}
}
__global__ void addRow(int *a , int *b , int *t)
{
int lp =0;
int index = threadIdx.x*blockDim.x;
for(lp = 0 ;lp<blockDim.x;lp++)
{
t[index] = a[index] + b[index];
index++;
}
}
int main(void)
{
int *a,*b,*t,n,i,j;
int *d_a,*d_b,*d_t;
printf("Enter the value of n: ");
scanf("%d",&n);
int size = sizeof(int)*n*n;
a = (int*)malloc(n*n*sizeof(int));
b = (int*)malloc(n*n*sizeof(int));
t = (int*)malloc(n*n*sizeof(int));
printf("Enter input matrix 1 : \n");
for(i = 0;i<n*n;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<n*n;i++)
scanf("%d",&b[i]);
hipMalloc((void**)&d_a,size);
hipMalloc((void**)&d_b,size);
hipMalloc((void**)&d_t,size);
hipMemcpy(d_a,a,size,hipMemcpyHostToDevice);
hipMemcpy(d_b,b,size,hipMemcpyHostToDevice);
printf("Enter 1 for Row \n 2 for Column \n 3 for Element \n");
int ch;
scanf("%d",&ch);
if(ch == 1)
{
dim3 block(n,1);
dim3 grid(1,1);
hipLaunchKernelGGL(( addRow), dim3(grid),dim3(block), 0, 0, d_a,d_b,d_t);
}
if(ch == 2)
{
dim3 block(n,1);
dim3 grid(1,1);
hipLaunchKernelGGL(( addCol), dim3(grid),dim3(block), 0, 0, d_a,d_b,d_t);
}
if(ch == 3)
{
dim3 block(n,n);
dim3 grid(1,1);
hipLaunchKernelGGL(( addElement), dim3(grid),dim3(block), 0, 0, d_a,d_b,d_t);
}
hipMemcpy(t,d_t,size,hipMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<n;i++)
{
for(j = 0;j<n;j++)
printf("%d ",t[i*n+j]);
printf("\n");
}
getchar();
hipFree(d_a);
hipFree(d_t);
return 0;
} | efb2f9604df3c82b05098f42c50c015ecee2dca6.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__ void addElement(int *a,int *b,int *t)
{
int v = threadIdx.y;
int n = v*blockDim.x+threadIdx.x;
t[n] = a[n]+b[n];
}
__global__ void addCol(int *a , int *b , int *t)
{
int lp =0;
int index = threadIdx.x;
for(lp = 0 ;lp<blockDim.x;lp++)
{
t[index] = a[index]+b[index];
index += blockDim.x;
}
}
__global__ void addRow(int *a , int *b , int *t)
{
int lp =0;
int index = threadIdx.x*blockDim.x;
for(lp = 0 ;lp<blockDim.x;lp++)
{
t[index] = a[index] + b[index];
index++;
}
}
int main(void)
{
int *a,*b,*t,n,i,j;
int *d_a,*d_b,*d_t;
printf("Enter the value of n: ");
scanf("%d",&n);
int size = sizeof(int)*n*n;
a = (int*)malloc(n*n*sizeof(int));
b = (int*)malloc(n*n*sizeof(int));
t = (int*)malloc(n*n*sizeof(int));
printf("Enter input matrix 1 : \n");
for(i = 0;i<n*n;i++)
scanf("%d",&a[i]);
printf("Enter input matrix 2 : \n");
for(i = 0;i<n*n;i++)
scanf("%d",&b[i]);
cudaMalloc((void**)&d_a,size);
cudaMalloc((void**)&d_b,size);
cudaMalloc((void**)&d_t,size);
cudaMemcpy(d_a,a,size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,b,size,cudaMemcpyHostToDevice);
printf("Enter 1 for Row \n 2 for Column \n 3 for Element \n");
int ch;
scanf("%d",&ch);
if(ch == 1)
{
dim3 block(n,1);
dim3 grid(1,1);
addRow<<<grid,block>>>(d_a,d_b,d_t);
}
if(ch == 2)
{
dim3 block(n,1);
dim3 grid(1,1);
addCol<<<grid,block>>>(d_a,d_b,d_t);
}
if(ch == 3)
{
dim3 block(n,n);
dim3 grid(1,1);
addElement<<<grid,block>>>(d_a,d_b,d_t);
}
cudaMemcpy(t,d_t,size,cudaMemcpyDeviceToHost);
printf("Result vector is :\n");
for(i = 0;i<n;i++)
{
for(j = 0;j<n;j++)
printf("%d ",t[i*n+j]);
printf("\n");
}
getchar();
cudaFree(d_a);
cudaFree(d_t);
return 0;
} |
476411f068bf65034e9f5556961f81edaa6499fd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C" {
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input);
}
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input) {
const int bj = blockIdx.x;
const int wtj = threadIdx.y;
const int ttj = threadIdx.x;
const int nrThreadsW = min(1024, w);
const int nrThreadsNrThreadsW = min(32, nrThreadsW);
const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj;
if (tj < nrThreadsW) {
const int j = bj * (1 * nrThreadsW) + tj;
if (j < w) {
float sumEven = 0.0;
float sumOdd = 0.0;
for (int i = 0; i < h - 1; i += 2) {
sumEven += input[j + i * (1 * w)];
sumOdd += input[j + (i + 1) * (1 * w)];
}
const float meanEven = sumEven / ((h + 1) / 2);
const float meanOdd = sumOdd / (h / 2);
for (int i = 0; i < h - 1; i += 2) {
output[j + i * (1 * w)] = input[j + i * (1 * w)] - meanEven;
output[j + (i + 1) * (1 * w)] = input[j + (i + 1) * (1 * w)] - meanOdd;
}
}
}
}
| 476411f068bf65034e9f5556961f81edaa6499fd.cu | // fermi
/*
* Copyright 2018 Vrije Universiteit Amsterdam, The Netherlands
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
extern "C" {
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input);
}
__global__ void zeromeanVerticallyKernel(const int h, const int w, float* output, const float* input) {
const int bj = blockIdx.x;
const int wtj = threadIdx.y;
const int ttj = threadIdx.x;
const int nrThreadsW = min(1024, w);
const int nrThreadsNrThreadsW = min(32, nrThreadsW);
const int tj = wtj * (1 * nrThreadsNrThreadsW) + ttj;
if (tj < nrThreadsW) {
const int j = bj * (1 * nrThreadsW) + tj;
if (j < w) {
float sumEven = 0.0;
float sumOdd = 0.0;
for (int i = 0; i < h - 1; i += 2) {
sumEven += input[j + i * (1 * w)];
sumOdd += input[j + (i + 1) * (1 * w)];
}
const float meanEven = sumEven / ((h + 1) / 2);
const float meanOdd = sumOdd / (h / 2);
for (int i = 0; i < h - 1; i += 2) {
output[j + i * (1 * w)] = input[j + i * (1 * w)] - meanEven;
output[j + (i + 1) * (1 * w)] = input[j + (i + 1) * (1 * w)] - meanOdd;
}
}
}
}
|
55685d3276288155ea886fcd382cc63d90fd6cc6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/******************************************************************************/
/*
GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/***************************************************************************//**
Purpose
-------
ZLASET_BAND initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
and A(i,i) = BETA, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_laset_band
*******************************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) );
hipLaunchKernelGGL(( zlaset_band_upper), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( magma_ceildiv( min(m,n), NB ) );
hipLaunchKernelGGL(( zlaset_band_lower), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, offdiag, diag, dA, ldda);
}
}
| 55685d3276288155ea886fcd382cc63d90fd6cc6.cu | /*
-- MAGMA (version 2.4.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date June 2018
@author Raffaele Solca
@author Mark Gates
@precisions normal z -> s d c
*/
#include "magma_internal.h"
#define NB 64
/******************************************************************************/
/*
GPU kernel for setting the k-1 super-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil((m+k-1)/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread k-1 does the main diagonal, thread k-2 the first super-diagonal, etc.
block 0 block 1
0 => skip above matrix
1 0 => skip above matrix
2 1 0 => skip above matrix
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ | 3 2 1 0 ]
[ | 3 2 1 ]
| 3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=10, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_upper(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x - k + 1;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == k-1)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j >= 0 && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/******************************************************************************/
/*
GPU kernel for setting the k-1 sub-diagonals to OFFDIAG
and the main diagonal to DIAG.
Divides matrix into min( ceil(m/nb), ceil(n/nb) ) block-columns,
with k threads in each block.
Each thread iterates across one diagonal.
Thread 0 does the main diagonal, thread 1 the first sub-diagonal, etc.
block 0 block 1
[ 0 | ]
[ 1 0 | ]
[ 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 0 | ]
[ 3 2 1 | 0 ]
[ 3 2 | 1 0 ]
[ 3 | 2 1 0 ]
[ 3 2 1 0 ]
[ 3 2 1 ]
3 2 => skip below matrix
3 => skip below matrix
Thread assignment for m=13, n=12, k=4, nb=8. Each column is done in parallel.
*/
__global__
void zlaset_band_lower(
int m, int n,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex *A, int lda)
{
//int k = blockDim.x;
int ibx = blockIdx.x * NB;
int ind = ibx + threadIdx.x;
A += ind + ibx*lda;
magmaDoubleComplex value = offdiag;
if (threadIdx.x == 0)
value = diag;
#pragma unroll
for (int j=0; j < NB; j++) {
if (ibx + j < n && ind + j < m) {
A[j*(lda+1)] = value;
}
}
}
/***************************************************************************//**
Purpose
-------
ZLASET_BAND initializes the main diagonal of dA to DIAG,
and the K-1 sub- or super-diagonals to OFFDIAG.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be set.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
k INTEGER
The number of diagonals to set, including the main diagonal. K >= 0.
Currently, K <= 1024 due to CUDA restrictions (max. number of threads per block).
@param[in]
offdiag COMPLEX_16
Off-diagonal elements in the band are set to OFFDIAG.
@param[in]
diag COMPLEX_16
All the main diagonal elements are set to DIAG.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
On exit, A(i,j) = ALPHA, 1 <= i <= m, 1 <= j <= n where i != j, abs(i-j) < k;
and A(i,i) = BETA, 1 <= i <= min(m,n)
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[in]
queue magma_queue_t
Stream to execute ZLASET in.
@ingroup magma_laset_band
*******************************************************************************/
extern "C" void
magmablas_zlaset_band(
magma_uplo_t uplo, magma_int_t m, magma_int_t n, magma_int_t k,
magmaDoubleComplex offdiag, magmaDoubleComplex diag,
magmaDoubleComplex_ptr dA, magma_int_t ldda,
magma_queue_t queue)
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( k < 0 || k > 1024 )
info = -4;
else if ( ldda < max(1,m) )
info = -6;
if (info != 0) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if (uplo == MagmaUpper) {
dim3 threads( min(k,n) );
dim3 grid( magma_ceildiv( min(m+k-1,n), NB ) );
zlaset_band_upper<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda);
}
else if (uplo == MagmaLower) {
dim3 threads( min(k,m) );
dim3 grid( magma_ceildiv( min(m,n), NB ) );
zlaset_band_lower<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, offdiag, diag, dA, ldda);
}
}
|
7c51da3c0b3238b3b14f3b535ba9e548327ace88.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers_hip.cuh"
namespace xgboost {
// the handler to call instead of hipSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
auto s_data = dh::ToSpan(*data_d_);
dh::LaunchN(data_d_->size(),
[=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(hipMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
hipMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
if (device_ >= 0 && device >= 0) {
CHECK_EQ(device_, device) << "New device ordinal is different from previous one.";
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(hipMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
hipMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
hipMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(hipMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), hipMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(hipSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<double>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<GradientPairPrecise>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
template class HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>;
template class HostDeviceVector<RTreeNodeStat>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
| 7c51da3c0b3238b3b14f3b535ba9e548327ace88.cu | /**
* Copyright 2017-2023 by XGBoost contributors
*/
#include <thrust/fill.h>
#include <thrust/device_ptr.h>
#include <algorithm>
#include <cstdint>
#include <mutex>
#include "xgboost/data.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/tree_model.h"
#include "device_helpers.cuh"
namespace xgboost {
// the handler to call instead of cudaSetDevice; only used for testing
static void (*cudaSetDeviceHandler)(int) = nullptr; // NOLINT
void SetCudaSetDeviceHandler(void (*handler)(int)) {
cudaSetDeviceHandler = handler;
}
template <typename T>
class HostDeviceVectorImpl {
public:
HostDeviceVectorImpl(size_t size, T v, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(size, v);
} else {
data_h_.resize(size, v);
}
}
// Initializer can be std::vector<T> or std::initializer_list<T>
template <class Initializer>
HostDeviceVectorImpl(const Initializer& init, int device) : device_(device) {
if (device >= 0) {
gpu_access_ = GPUAccess::kWrite;
LazyResizeDevice(init.size());
Copy(init);
} else {
data_h_ = init;
}
}
HostDeviceVectorImpl(HostDeviceVectorImpl<T>&& that) :
device_{that.device_},
data_h_{std::move(that.data_h_)},
data_d_{std::move(that.data_d_)},
gpu_access_{that.gpu_access_} {}
~HostDeviceVectorImpl() {
if (device_ >= 0) {
SetDevice();
}
}
size_t Size() const {
return HostCanRead() ? data_h_.size() : data_d_ ? data_d_->size() : 0;
}
int DeviceIdx() const { return device_; }
T* DevicePointer() {
LazySyncDevice(GPUAccess::kWrite);
return data_d_->data().get();
}
const T* ConstDevicePointer() {
LazySyncDevice(GPUAccess::kRead);
return data_d_->data().get();
}
common::Span<T> DeviceSpan() {
LazySyncDevice(GPUAccess::kWrite);
return {data_d_->data().get(), Size()};
}
common::Span<const T> ConstDeviceSpan() {
LazySyncDevice(GPUAccess::kRead);
return {data_d_->data().get(), Size()};
}
void Fill(T v) { // NOLINT
if (HostCanWrite()) {
std::fill(data_h_.begin(), data_h_.end(), v);
} else {
gpu_access_ = GPUAccess::kWrite;
SetDevice();
auto s_data = dh::ToSpan(*data_d_);
dh::LaunchN(data_d_->size(),
[=] XGBOOST_DEVICE(size_t i) { s_data[i] = v; });
}
}
void Copy(HostDeviceVectorImpl<T>* other) {
CHECK_EQ(Size(), other->Size());
SetDevice(other->device_);
// Data is on host.
if (HostCanWrite() && other->HostCanWrite()) {
std::copy(other->data_h_.begin(), other->data_h_.end(), data_h_.begin());
return;
}
SetDevice();
CopyToDevice(other);
}
void Copy(const std::vector<T>& other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.data());
}
}
void Copy(std::initializer_list<T> other) {
CHECK_EQ(Size(), other.size());
if (HostCanWrite()) {
std::copy(other.begin(), other.end(), data_h_.begin());
} else {
CopyToDevice(other.begin());
}
}
void Extend(HostDeviceVectorImpl* other) {
auto ori_size = this->Size();
this->Resize(ori_size + other->Size(), T());
if (HostCanWrite() && other->HostCanRead()) {
auto& h_vec = this->HostVector();
auto& other_vec = other->HostVector();
CHECK_EQ(h_vec.size(), ori_size + other->Size());
std::copy(other_vec.cbegin(), other_vec.cend(), h_vec.begin() + ori_size);
} else {
auto ptr = other->ConstDevicePointer();
SetDevice();
CHECK_EQ(this->DeviceIdx(), other->DeviceIdx());
dh::safe_cuda(cudaMemcpyAsync(this->DevicePointer() + ori_size,
ptr,
other->Size() * sizeof(T),
cudaMemcpyDeviceToDevice));
}
}
std::vector<T>& HostVector() {
LazySyncHost(GPUAccess::kNone);
return data_h_;
}
const std::vector<T>& ConstHostVector() {
LazySyncHost(GPUAccess::kRead);
return data_h_;
}
void SetDevice(int device) {
if (device_ == device) { return; }
if (device_ >= 0) {
LazySyncHost(GPUAccess::kNone);
}
if (device_ >= 0 && device >= 0) {
CHECK_EQ(device_, device) << "New device ordinal is different from previous one.";
}
device_ = device;
if (device_ >= 0) {
LazyResizeDevice(data_h_.size());
}
}
void Resize(size_t new_size, T v) {
if (new_size == Size()) { return; }
if ((Size() == 0 && device_ >= 0) || (DeviceCanWrite() && device_ >= 0)) {
// fast on-device resize
gpu_access_ = GPUAccess::kWrite;
SetDevice();
data_d_->resize(new_size, v);
} else {
// resize on host
LazySyncHost(GPUAccess::kNone);
data_h_.resize(new_size, v);
}
}
void LazySyncHost(GPUAccess access) {
if (HostCanAccess(access)) { return; }
if (HostCanRead()) {
// data is present, just need to deny access to the device
gpu_access_ = access;
return;
}
gpu_access_ = access;
if (data_h_.size() != data_d_->size()) { data_h_.resize(data_d_->size()); }
SetDevice();
dh::safe_cuda(cudaMemcpy(data_h_.data(),
data_d_->data().get(),
data_d_->size() * sizeof(T),
cudaMemcpyDeviceToHost));
}
void LazySyncDevice(GPUAccess access) {
if (DeviceCanAccess(access)) { return; }
if (DeviceCanRead()) {
// deny read to the host
gpu_access_ = access;
return;
}
// data is on the host
LazyResizeDevice(data_h_.size());
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(),
data_h_.data(),
data_d_->size() * sizeof(T),
cudaMemcpyHostToDevice));
gpu_access_ = access;
}
bool HostCanAccess(GPUAccess access) const { return gpu_access_ <= access; }
bool HostCanRead() const { return HostCanAccess(GPUAccess::kRead); }
bool HostCanWrite() const { return HostCanAccess(GPUAccess::kNone); }
bool DeviceCanAccess(GPUAccess access) const { return gpu_access_ >= access; }
bool DeviceCanRead() const { return DeviceCanAccess(GPUAccess::kRead); }
bool DeviceCanWrite() const { return DeviceCanAccess(GPUAccess::kWrite); }
GPUAccess Access() const { return gpu_access_; }
private:
int device_{-1};
std::vector<T> data_h_{};
std::unique_ptr<dh::device_vector<T>> data_d_{};
GPUAccess gpu_access_{GPUAccess::kNone};
void CopyToDevice(HostDeviceVectorImpl* other) {
if (other->HostCanWrite()) {
CopyToDevice(other->data_h_.data());
} else {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), other->data_d_->data().get(),
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
}
void CopyToDevice(const T* begin) {
LazyResizeDevice(Size());
gpu_access_ = GPUAccess::kWrite;
SetDevice();
dh::safe_cuda(cudaMemcpyAsync(data_d_->data().get(), begin,
data_d_->size() * sizeof(T), cudaMemcpyDefault));
}
void LazyResizeDevice(size_t new_size) {
if (data_d_ && new_size == data_d_->size()) { return; }
SetDevice();
data_d_->resize(new_size);
}
void SetDevice() {
CHECK_GE(device_, 0);
if (cudaSetDeviceHandler == nullptr) {
dh::safe_cuda(cudaSetDevice(device_));
} else {
(*cudaSetDeviceHandler)(device_);
}
if (!data_d_) {
data_d_.reset(new dh::device_vector<T>);
}
}
};
template<typename T>
HostDeviceVector<T>::HostDeviceVector(size_t size, T v, int device)
: impl_(new HostDeviceVectorImpl<T>(size, v, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(std::initializer_list<T> init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(const std::vector<T>& init, int device)
: impl_(new HostDeviceVectorImpl<T>(init, device)) {}
template <typename T>
HostDeviceVector<T>::HostDeviceVector(HostDeviceVector<T>&& other)
: impl_(new HostDeviceVectorImpl<T>(std::move(*other.impl_))) {}
template <typename T>
HostDeviceVector<T>& HostDeviceVector<T>::operator=(HostDeviceVector<T>&& other) {
if (this == &other) { return *this; }
std::unique_ptr<HostDeviceVectorImpl<T>> new_impl(
new HostDeviceVectorImpl<T>(std::move(*other.impl_)));
delete impl_;
impl_ = new_impl.release();
return *this;
}
template <typename T>
HostDeviceVector<T>::~HostDeviceVector() {
delete impl_;
impl_ = nullptr;
}
template <typename T>
size_t HostDeviceVector<T>::Size() const { return impl_->Size(); }
template <typename T>
int HostDeviceVector<T>::DeviceIdx() const { return impl_->DeviceIdx(); }
template <typename T>
T* HostDeviceVector<T>::DevicePointer() {
return impl_->DevicePointer();
}
template <typename T>
const T* HostDeviceVector<T>::ConstDevicePointer() const {
return impl_->ConstDevicePointer();
}
template <typename T>
common::Span<T> HostDeviceVector<T>::DeviceSpan() {
return impl_->DeviceSpan();
}
template <typename T>
common::Span<const T> HostDeviceVector<T>::ConstDeviceSpan() const {
return impl_->ConstDeviceSpan();
}
template <typename T>
void HostDeviceVector<T>::Fill(T v) {
impl_->Fill(v);
}
template <typename T>
void HostDeviceVector<T>::Copy(const HostDeviceVector<T>& other) {
impl_->Copy(other.impl_);
}
template <typename T>
void HostDeviceVector<T>::Copy(const std::vector<T>& other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Copy(std::initializer_list<T> other) {
impl_->Copy(other);
}
template <typename T>
void HostDeviceVector<T>::Extend(HostDeviceVector const& other) {
impl_->Extend(other.impl_);
}
template <typename T>
std::vector<T>& HostDeviceVector<T>::HostVector() { return impl_->HostVector(); }
template <typename T>
const std::vector<T>& HostDeviceVector<T>::ConstHostVector() const {
return impl_->ConstHostVector();
}
template <typename T>
bool HostDeviceVector<T>::HostCanRead() const {
return impl_->HostCanRead();
}
template <typename T>
bool HostDeviceVector<T>::HostCanWrite() const {
return impl_->HostCanWrite();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanRead() const {
return impl_->DeviceCanRead();
}
template <typename T>
bool HostDeviceVector<T>::DeviceCanWrite() const {
return impl_->DeviceCanWrite();
}
template <typename T>
GPUAccess HostDeviceVector<T>::DeviceAccess() const {
return impl_->Access();
}
template <typename T>
void HostDeviceVector<T>::SetDevice(int device) const {
impl_->SetDevice(device);
}
template <typename T>
void HostDeviceVector<T>::Resize(size_t new_size, T v) {
impl_->Resize(new_size, v);
}
// explicit instantiations are required, as HostDeviceVector isn't header-only
template class HostDeviceVector<bst_float>;
template class HostDeviceVector<double>;
template class HostDeviceVector<GradientPair>;
template class HostDeviceVector<GradientPairPrecise>;
template class HostDeviceVector<int32_t>; // bst_node_t
template class HostDeviceVector<uint8_t>;
template class HostDeviceVector<FeatureType>;
template class HostDeviceVector<Entry>;
template class HostDeviceVector<uint64_t>; // bst_row_t
template class HostDeviceVector<uint32_t>; // bst_feature_t
template class HostDeviceVector<RegTree::Node>;
template class HostDeviceVector<RegTree::CategoricalSplitMatrix::Segment>;
template class HostDeviceVector<RTreeNodeStat>;
#if defined(__APPLE__)
/*
* On OSX:
*
* typedef unsigned int uint32_t;
* typedef unsigned long long uint64_t;
* typedef unsigned long __darwin_size_t;
*/
template class HostDeviceVector<std::size_t>;
#endif // defined(__APPLE__)
} // namespace xgboost
|
10c5a84b083150e532358d4e5d0e73a29937dff0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
// THIS UPDATE DOES NOT UPDATE ELOSS?
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void extractFEAT(const double *Params, const int *st, const int *id, const int *counter, const float *dout, const int *iList, const float *mu, float *d_feat){
int t, tidx, tidy,Nblocks,NthreadsX,idF, bid, NT, ind, tcurr, Nnearest;
float rMax, Ci, Cf, lam;
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
NT = (int) Params[0];
Nnearest = (int) Params[5];
NthreadsX = blockDim.x;
Nblocks = gridDim.x;
lam = (float) Params[7];
// each thread x does a nearby filter
// each thread x combines with blocks to go through all new spikes
ind = counter[1]+tidx + NthreadsX * bid;
while(ind<counter[0]){
tcurr = st[ind];
rMax = 0.0f;
idF = iList[tidy + Nnearest * id[ind]];
for (t=-3;t<3;t++){
Ci = dout[tcurr +t+ idF * NT] + lam/mu[idF];
Cf = Ci / sqrt(lam/(mu[idF] * mu[idF]) + 1.0f);
rMax = max(rMax, Cf);
}
d_feat[tidy + ind * Nnearest] = rMax;
ind += NthreadsX * Nblocks;
}
} | 10c5a84b083150e532358d4e5d0e73a29937dff0.cu | #include "includes.h"
const int Nthreads = 1024, maxFR = 100000, NrankMax = 3, nmaxiter = 500, NchanMax = 32;
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
// THIS UPDATE DOES NOT UPDATE ELOSS?
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////
__global__ void extractFEAT(const double *Params, const int *st, const int *id, const int *counter, const float *dout, const int *iList, const float *mu, float *d_feat){
int t, tidx, tidy,Nblocks,NthreadsX,idF, bid, NT, ind, tcurr, Nnearest;
float rMax, Ci, Cf, lam;
tidx = threadIdx.x;
tidy = threadIdx.y;
bid = blockIdx.x;
NT = (int) Params[0];
Nnearest = (int) Params[5];
NthreadsX = blockDim.x;
Nblocks = gridDim.x;
lam = (float) Params[7];
// each thread x does a nearby filter
// each thread x combines with blocks to go through all new spikes
ind = counter[1]+tidx + NthreadsX * bid;
while(ind<counter[0]){
tcurr = st[ind];
rMax = 0.0f;
idF = iList[tidy + Nnearest * id[ind]];
for (t=-3;t<3;t++){
Ci = dout[tcurr +t+ idF * NT] + lam/mu[idF];
Cf = Ci / sqrt(lam/(mu[idF] * mu[idF]) + 1.0f);
rMax = max(rMax, Cf);
}
d_feat[tidy + ind * Nnearest] = rMax;
ind += NthreadsX * Nblocks;
}
} |
1d317755736cd9595f18b88c10b03b0697ba8b69.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <iostream>
#include <hip/hip_runtime_api.h>
//#include <cutil.h>
#include <hip/hip_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r114;\n\t"
"mov.f32 %r114, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
hipProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
hipError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
hipMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
hipMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
hipMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 1 is %s\n", hipGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
hipMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, hipMemcpyHostToDevice);
hipMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyHostToDevice);
hipDeviceSynchronize ();
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 2 is %s\n", hipGetErrorString(error_id));
}
hipLaunchKernelGGL(( init_memory) , dim3(1), dim3(1), 0, 0, d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
hipEvent_t start, stop;
float time;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipFuncSetCacheConfig(shared_latency, hipFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
hipLaunchKernelGGL(( shared_latency) , dim3(num_blocks), dim3(num_threads_per_block), 0, 0, d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
hipDeviceSynchronize();
///hipDeviceSynchronize ();
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
error_id = hipGetLastError();
if (error_id != hipSuccess) {
printf("Error 3 is %s\n", hipGetErrorString(error_id));
}
/* copy results from GPU to CPU */
hipMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, hipMemcpyDeviceToHost);
hipMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, hipMemcpyDeviceToHost);
hipDeviceSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
hipFree(d_a);
hipFree(d_ptr_a);
hipFree(duration);
hipDeviceSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
| 1d317755736cd9595f18b88c10b03b0697ba8b69.cu | #include <stdio.h>
#include <iostream>
#include <cuda_profiler_api.h>
//#include <cutil.h>
#include <cuda_runtime.h>
#define SHARED_MEM_ELEMENTS 1024
#define GLOBAL_MEM_ELEMENTS 4096
int num_blocks;
int num_threads_per_block;
int num_iterations;
int divergence;
float* h_A;
float* h_B;
float* h_C;
float* h_res;
float* d_A;
float* d_B;
float* d_C;
float* d_res;
__global__ void init_memory (unsigned long long ** my_ptr_array, unsigned long long * my_array, int stride, int num_blocks_k, int num_threads_per_block_k) {
int block_id;
int warp_id;
int i;
int index;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
void **ptr_array = (void **)my_ptr_array;
unsigned long long *array = (unsigned long long *)my_array;
if (tid == 0) {
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
//int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
// for (block_id = 0; block_id < num_blocks_k; block_id++) {
for (warp_id = 0; warp_id < num_warps_per_block; warp_id++) {
for (i = 0; i < elements_per_warp; i++) {
//index = (block_id * elements_per_block) + (warp_id * elements_per_warp);
index = (warp_id * elements_per_warp);
ptr_array[index + i] = (void*)&array[(index + ((i + 16) % elements_per_warp))];
}
}
/* for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
ptr_array[i] = (void*)&array[(i + 32)%GLOBAL_MEM_ELEMENTS];
}
*/
for (i = 0; i < GLOBAL_MEM_ELEMENTS; i++) {
//array[i] = (unsigned long long)ptr_array[(i+stride)%GLOBAL_MEM_ELEMENTS];
array[i] = (unsigned long long)ptr_array[i];
}
}
__syncthreads();
}
__global__ void shared_latency (unsigned long long ** my_ptr_array, unsigned long long * my_array, int array_length, int iterations, unsigned long long * duration, int stride, int divergence, int num_blocks_k, int num_threads_per_block_k) {
// unsigned long long int start_time, end_time;
unsigned long long int sum_time = 0;
int i, k;
int tid = blockDim.x * blockIdx.x + threadIdx.x;
int block_id = blockIdx.x;
int warp_id = threadIdx.x / 32;
int warp_thread_id = threadIdx.x % 32;
// int elements_per_block = GLOBAL_MEM_ELEMENTS / num_blocks_k;
int num_warps_per_block = num_threads_per_block_k / 32;
// int elements_per_warp = elements_per_block / num_warps_per_block;
int elements_per_warp = GLOBAL_MEM_ELEMENTS / num_warps_per_block;
//int index1 = (block_id * elements_per_block) + (warp_id * elements_per_warp) + warp_thread_id;
int index1 = (warp_id * elements_per_warp) + warp_thread_id;
void **ptr_array = (void **)my_ptr_array;
unsigned long long int *array = (unsigned long long int *)my_array;
void **tmp_ptr;
//tmp_ptr = (void *)sdata;
//tmp_ptr = (void **)(&(ptr_array[(threadIdx.x * stride)%GLOBAL_MEM_ELEMENTS]));
//tmp_ptr = (void **)(&(ptr_array[(tid * stride)%GLOBAL_MEM_ELEMENTS]));
tmp_ptr = (void **)(&(ptr_array[index1]));
double f1, f2, f3;
f1 = 1.1;
f2 = 2.5;
if (warp_thread_id < divergence) {
/* __asm volatile (
".reg .f32 %r114;\n\t"
"mov.f32 %r114, 2.2;\n\t"
);
*/
for (int l = 0; l < iterations; l++) {
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
f1 = f1 + (unsigned long long)(*tmp_ptr);
tmp_ptr = (void**)(*tmp_ptr);
}
}
// __syncthreads();
// if ((blockDim.x * blockIdx.x + threadIdx.x) == 0)
duration[tid] = (unsigned long long)(*tmp_ptr) + (f1 * tid);
// __syncthreads();
}
void usage() {
std::cout << "Usage ./binary <num_blocks> <num_threads_per_block> <iterations>" "threads active per warp" << std::endl;
}
void parametric_measure_shared(int N, int iterations, int stride) {
cudaProfilerStop();
int i;
unsigned long long int * h_a;
unsigned long long int * d_a;
unsigned long long ** h_ptr_a;
unsigned long long ** d_ptr_a;
unsigned long long * duration;
unsigned long long * latency;
cudaError_t error_id;
/* allocate array on CPU */
h_a = (unsigned long long *)malloc(sizeof(unsigned long long int) * N);
h_ptr_a = (unsigned long long **)malloc(sizeof(unsigned long long int*)*N);
latency = (unsigned long long *)malloc(sizeof(unsigned long long) * num_threads_per_block * num_blocks);
/* initialize array elements on CPU */
for (i = 0; i < N; i++) {
h_ptr_a[i] = (unsigned long long *)&h_a[i];
}
for (i = 0; i < N; i++) {
h_a[i] = (unsigned long long)h_ptr_a[(i + 1 + stride) % N];
}
/* allocate arrays on GPU */
cudaMalloc ((void **) &d_a, sizeof(unsigned long long int) * N );
cudaMalloc ((void **) &d_ptr_a, sizeof(unsigned long long int*) * N );
cudaMalloc ((void **) &duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 1 is %s\n", cudaGetErrorString(error_id));
}
/* copy array elements from CPU to GPU */
cudaMemcpy((void *)d_a, (void *)h_a, sizeof(unsigned long long int) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)d_ptr_a, (void *)h_ptr_a, sizeof(unsigned long long int *) * N, cudaMemcpyHostToDevice);
cudaMemcpy((void *)duration, (void *)latency, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyHostToDevice);
cudaThreadSynchronize ();
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 2 is %s\n", cudaGetErrorString(error_id));
}
init_memory <<<1, 1>>>(d_ptr_a, d_a, stride, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
/* launch kernel*/
//dim3 Db = dim3(13);
//dim3 Dg = dim3(768,1,1);
//printf("Launch kernel with parameters: %d, N: %d, stride: %d\n", iterations, N, stride);
// int sharedMemSize = sizeof(unsigned long long int) * N ;
cudaEvent_t start, stop;
float time;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
cudaFuncSetCacheConfig(shared_latency, cudaFuncCachePreferL1);
//shared_latency <<<Dg, Db, sharedMemSize>>>(d_a, N, iterations, duration);
//shared_latency <<<num_blocks, num_threads_per_block, sharedMemSize>>>(d_a, N, num_iterations, duration, stride, divergence);
shared_latency <<<num_blocks, num_threads_per_block>>>(d_ptr_a, d_a, N, num_iterations, duration, stride, divergence, num_blocks, num_threads_per_block);
cudaDeviceSynchronize();
///cudaThreadSynchronize ();
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
error_id = cudaGetLastError();
if (error_id != cudaSuccess) {
printf("Error 3 is %s\n", cudaGetErrorString(error_id));
}
/* copy results from GPU to CPU */
cudaMemcpy((void *)h_a, (void *)d_a, sizeof(unsigned long long int) * N, cudaMemcpyDeviceToHost);
cudaMemcpy((void *)latency, (void *)duration, sizeof(unsigned long long) * num_threads_per_block * num_blocks, cudaMemcpyDeviceToHost);
cudaThreadSynchronize ();
/* print results*/
unsigned long long max_dur = latency[0];
unsigned long long min_dur = latency[0];
unsigned long long avg_lat = latency[0];
for (int i = 1; i < num_threads_per_block * num_blocks; i++) {
avg_lat += latency[i];
if (latency[i] > max_dur) {
max_dur = latency[i];
} else if (latency[i] < min_dur) {
min_dur = latency[i];
}
}
// printf(" %d, %f, %f, %f, %f\n",stride,(double)(avg_lat/(num_threads_per_block * num_blocks * 256.0 *num_iterations)), (double)(min_dur/(256.0 * num_iterations)), (double)(max_dur/(256.0 * num_iterations)), time);
printf("%f\n", time);
/* free memory on GPU */
cudaFree(d_a);
cudaFree(d_ptr_a);
cudaFree(duration);
cudaThreadSynchronize ();
/*free memory on CPU */
free(h_a);
free(h_ptr_a);
free(latency);
}
int main(int argc, char **argv)
{
int N;
if (argc != 6) {
usage();
exit(1);
}
num_blocks = atoi(argv[1]);
num_threads_per_block = atoi(argv[2]);
num_iterations = atoi(argv[3]);
divergence = atoi(argv[4]);
int stride = atoi(argv[5]);
N = GLOBAL_MEM_ELEMENTS;
parametric_measure_shared(N, 10, stride);
return 0;
}
|
a0ccb7c9cebb8d3d69746174308aebae7ff3dda0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Points to Voxels & Voxels to Points (Modified from SparseConv)
Written by Li Jiang
*/
#include "voxelize.h"
template <typename T>
__global__ void voxelize_fp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&out[plane], multiplier * inp[plane]);
}
}
}
}
// input: feats N * C
// input: rules M * (1 + maxActive)
// output: output_feats M * C
template <typename T>
void voxelize_fp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
hipLaunchKernelGGL(( voxelize_fp_cuda_<T>), dim3(::min(nOutputRows, (Int)32768)), dim3(::min(nPlanes, (Int)32)), 0, 0, nOutputRows, maxActive, nPlanes, feats, output_feats, rules, average);
}
template <typename T>
__global__ void voxelize_bp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = d_output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = d_feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&inp[plane], multiplier * out[plane]);
}
}
}
}
template <typename T>
void voxelize_bp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
hipLaunchKernelGGL(( voxelize_bp_cuda_<T>), dim3(::min(nOutputRows, (Int)32768)), dim3(::min(nPlanes, (Int)32)), 0, 0, nOutputRows, maxActive, nPlanes, d_output_feats, d_feats, rules, average);
}
| a0ccb7c9cebb8d3d69746174308aebae7ff3dda0.cu | /*
Points to Voxels & Voxels to Points (Modified from SparseConv)
Written by Li Jiang
*/
#include "voxelize.h"
template <typename T>
__global__ void voxelize_fp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&out[plane], multiplier * inp[plane]);
}
}
}
}
// input: feats N * C
// input: rules M * (1 + maxActive)
// output: output_feats M * C
template <typename T>
void voxelize_fp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *feats, T *output_feats, Int *rules, bool average){
voxelize_fp_cuda_<T><<<std::min(nOutputRows, (Int)32768), std::min(nPlanes, (Int)32)>>>(nOutputRows, maxActive, nPlanes, feats, output_feats, rules, average);
}
template <typename T>
__global__ void voxelize_bp_cuda_(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
for(int row = blockIdx.x; row < nOutputRows; row += gridDim.x){
T *out = d_output_feats + row * nPlanes;
Int *r = rules + row * (maxActive + 1);
Int nActive = r[0];
T multiplier = (average and nActive > 0) ? (T) 1 / nActive : (T) 1;
for(int i = 1; i <= nActive; i++){
T *inp = d_feats + r[i] * nPlanes;
for(int plane = threadIdx.x; plane < nPlanes; plane += blockDim.x){
atomicAdd(&inp[plane], multiplier * out[plane]);
}
}
}
}
template <typename T>
void voxelize_bp_cuda(Int nOutputRows, Int maxActive, Int nPlanes, T *d_output_feats, T *d_feats, Int *rules, bool average){
voxelize_bp_cuda_<T><<<std::min(nOutputRows, (Int)32768), std::min(nPlanes, (Int)32)>>>(nOutputRows, maxActive, nPlanes, d_output_feats, d_feats, rules, average);
}
|
7ab6ff6e5b8c559808fdc2b00f8307a41523ddfc.hip | // !!! This is a file automatically generated by hipify!!!
#define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <needle.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
// includes, kernels
#include <needle_kernel.cu>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period);
}
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// BEGIN ADARSH DUMMY LOOP TO INCREASE FAST FORWARD
// look at the above blosum62 array
// copy the array into dummyArray and do some stencil operations
// do some column totals and row totals
// Finally, print row totals so compiler doesnt discard the loop as no use
int *dummyArray[48];
int i,j,k,l,ctr;
for (l=0;l<2;l++) {
for (i=0;i<48;i++)
dummyArray[i] = (int *)malloc( 48 * sizeof(int) );
for ( i=0; i<24; i++)
for ( j=0; j<24; j++){
dummyArray[i][j] = blosum62[i][j];
dummyArray[i+24][j] = blosum62[i][j];
dummyArray[i][j+24] = blosum62[i][j];
dummyArray[i+24][j+24] = blosum62[i][j];
}
for ( k=1; k<20000; k++) {
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] + dummyArray[i+1][j]) * (dummyArray[i][j+1] + dummyArray[i][j-1]);
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] * dummyArray[i+1][j]) + (dummyArray[i][j+1] * dummyArray[i][j-1]);
for (i=0;i<48; i++) {
ctr = dummyArray[0][i];
for ( j=1;j<48; j++) {
ctr += dummyArray[j][i];
dummyArray[j][i] = ctr;
}
}
for (i=0;i<48; i++) {
ctr = dummyArray[i][0];
for ( j=1;j<48; j++) {
ctr += dummyArray[i][j];
dummyArray[i][j] = ctr;
}
}
}
fprintf(stdout, "Begin dummy output\n");
for ( i=1; i<48; i++)
fprintf(stdout, "%d ", dummyArray[23][i]);
fprintf(stdout, "\nEnd of dummy output\n");
for (i=0;i<48;i++)
free(dummyArray[i]);
}
// END OF ADARSH DUMMY LOOP
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
//int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
//int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
// size = max_cols * max_rows;
//hipMalloc((void**)& referrence_cuda, sizeof(int)*size);
//hipMalloc((void**)& matrix_cuda, sizeof(int)*size);
//hipMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
#ifdef GEM5_FUSION
m5_dump_stats(0, 0);
m5_work_begin(0, 0);
#endif
//hipMemcpy(referrence_cuda, referrence, sizeof(int) * size, hipMemcpyHostToDevice);
//hipMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, hipMemcpyHostToDevice);
//REPEAT KERNEL FOR LONG GPU EXECUTION
for (int adp=0; adp<1000; adp++) {
printf("Starting GPU execution %d", adp+1);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_1), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence, input_itemsets, output_itemsets
,max_cols, penalty, i, block_width);
hipDeviceSynchronize();
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
hipLaunchKernelGGL(( needle_cuda_shared_2), dim3(dimGrid), dim3(dimBlock), 0, 0, referrence, input_itemsets, output_itemsets
,max_cols, penalty, i, block_width);
hipDeviceSynchronize();
}
}
//hipMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, hipMemcpyDeviceToHost);
output_itemsets = input_itemsets;
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
#define TRACEBACK
#ifdef TRACEBACK
#ifdef GEM5_FUSION
m5_work_begin(1, 0);
#endif
FILE *fpo = stdout;
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fprintf(fpo, "\n");
fflush(fpo);
// fclose(fpo);
#ifdef GEM5_FUSION
m5_work_end(1, 0);
#endif
#endif
// hipFree(referrence_cuda);
// hipFree(matrix_cuda);
// hipFree(matrix_cuda_out);
free(referrence);
free(input_itemsets);
//free(output_itemsets);
}
| 7ab6ff6e5b8c559808fdc2b00f8307a41523ddfc.cu | #define LIMIT -999
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <needle.h>
#include <cuda.h>
#include <sys/time.h>
// includes, kernels
#include <needle_kernel.cu>
#ifdef GEM5_FUSION
#include <stdint.h>
extern "C" {
void m5_work_begin(uint64_t workid, uint64_t threadid);
void m5_work_end(uint64_t workid, uint64_t threadid);
void m5_dump_stats(uint64_t ns_delay, uint64_t ns_period);
}
#endif
////////////////////////////////////////////////////////////////////////////////
// declaration, forward
void runTest( int argc, char** argv);
int blosum62[24][24] = {
{ 4, -1, -2, -2, 0, -1, -1, 0, -2, -1, -1, -1, -1, -2, -1, 1, 0, -3, -2, 0, -2, -1, 0, -4},
{-1, 5, 0, -2, -3, 1, 0, -2, 0, -3, -2, 2, -1, -3, -2, -1, -1, -3, -2, -3, -1, 0, -1, -4},
{-2, 0, 6, 1, -3, 0, 0, 0, 1, -3, -3, 0, -2, -3, -2, 1, 0, -4, -2, -3, 3, 0, -1, -4},
{-2, -2, 1, 6, -3, 0, 2, -1, -1, -3, -4, -1, -3, -3, -1, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{ 0, -3, -3, -3, 9, -3, -4, -3, -3, -1, -1, -3, -1, -2, -3, -1, -1, -2, -2, -1, -3, -3, -2, -4},
{-1, 1, 0, 0, -3, 5, 2, -2, 0, -3, -2, 1, 0, -3, -1, 0, -1, -2, -1, -2, 0, 3, -1, -4},
{-1, 0, 0, 2, -4, 2, 5, -2, 0, -3, -3, 1, -2, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -2, 0, -1, -3, -2, -2, 6, -2, -4, -4, -2, -3, -3, -2, 0, -2, -2, -3, -3, -1, -2, -1, -4},
{-2, 0, 1, -1, -3, 0, 0, -2, 8, -3, -3, -1, -2, -1, -2, -1, -2, -2, 2, -3, 0, 0, -1, -4},
{-1, -3, -3, -3, -1, -3, -3, -4, -3, 4, 2, -3, 1, 0, -3, -2, -1, -3, -1, 3, -3, -3, -1, -4},
{-1, -2, -3, -4, -1, -2, -3, -4, -3, 2, 4, -2, 2, 0, -3, -2, -1, -2, -1, 1, -4, -3, -1, -4},
{-1, 2, 0, -1, -3, 1, 1, -2, -1, -3, -2, 5, -1, -3, -1, 0, -1, -3, -2, -2, 0, 1, -1, -4},
{-1, -1, -2, -3, -1, 0, -2, -3, -2, 1, 2, -1, 5, 0, -2, -1, -1, -1, -1, 1, -3, -1, -1, -4},
{-2, -3, -3, -3, -2, -3, -3, -3, -1, 0, 0, -3, 0, 6, -4, -2, -2, 1, 3, -1, -3, -3, -1, -4},
{-1, -2, -2, -1, -3, -1, -1, -2, -2, -3, -3, -1, -2, -4, 7, -1, -1, -4, -3, -2, -2, -1, -2, -4},
{ 1, -1, 1, 0, -1, 0, 0, 0, -1, -2, -2, 0, -1, -2, -1, 4, 1, -3, -2, -2, 0, 0, 0, -4},
{ 0, -1, 0, -1, -1, -1, -1, -2, -2, -1, -1, -1, -1, -2, -1, 1, 5, -2, -2, 0, -1, -1, 0, -4},
{-3, -3, -4, -4, -2, -2, -3, -2, -2, -3, -2, -3, -1, 1, -4, -3, -2, 11, 2, -3, -4, -3, -2, -4},
{-2, -2, -2, -3, -2, -1, -2, -3, 2, -1, -1, -2, -1, 3, -3, -2, -2, 2, 7, -1, -3, -2, -1, -4},
{ 0, -3, -3, -3, -1, -2, -2, -3, -3, 3, 1, -2, 1, -1, -2, -2, 0, -3, -1, 4, -3, -2, -1, -4},
{-2, -1, 3, 4, -3, 0, 1, -1, 0, -3, -4, 0, -3, -3, -2, 0, -1, -4, -3, -3, 4, 1, -1, -4},
{-1, 0, 0, 1, -3, 3, 4, -2, 0, -3, -3, 1, -1, -3, -1, 0, -1, -3, -2, -2, 1, 4, -1, -4},
{ 0, -1, -1, -1, -2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -2, 0, 0, -2, -1, -1, -1, -1, -1, -4},
{-4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, -4, 1}
};
double gettime() {
struct timeval t;
gettimeofday(&t,NULL);
return t.tv_sec+t.tv_usec*1e-6;
}
////////////////////////////////////////////////////////////////////////////////
// Program main
////////////////////////////////////////////////////////////////////////////////
int
main( int argc, char** argv)
{
// BEGIN ADARSH DUMMY LOOP TO INCREASE FAST FORWARD
// look at the above blosum62 array
// copy the array into dummyArray and do some stencil operations
// do some column totals and row totals
// Finally, print row totals so compiler doesnt discard the loop as no use
int *dummyArray[48];
int i,j,k,l,ctr;
for (l=0;l<2;l++) {
for (i=0;i<48;i++)
dummyArray[i] = (int *)malloc( 48 * sizeof(int) );
for ( i=0; i<24; i++)
for ( j=0; j<24; j++){
dummyArray[i][j] = blosum62[i][j];
dummyArray[i+24][j] = blosum62[i][j];
dummyArray[i][j+24] = blosum62[i][j];
dummyArray[i+24][j+24] = blosum62[i][j];
}
for ( k=1; k<20000; k++) {
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] + dummyArray[i+1][j]) * (dummyArray[i][j+1] + dummyArray[i][j-1]);
for ( i=1; i<47; i++)
for ( j=1; j<47; j++)
dummyArray[i][j] += (dummyArray[i-1][j] * dummyArray[i+1][j]) + (dummyArray[i][j+1] * dummyArray[i][j-1]);
for (i=0;i<48; i++) {
ctr = dummyArray[0][i];
for ( j=1;j<48; j++) {
ctr += dummyArray[j][i];
dummyArray[j][i] = ctr;
}
}
for (i=0;i<48; i++) {
ctr = dummyArray[i][0];
for ( j=1;j<48; j++) {
ctr += dummyArray[i][j];
dummyArray[i][j] = ctr;
}
}
}
fprintf(stdout, "Begin dummy output\n");
for ( i=1; i<48; i++)
fprintf(stdout, "%d ", dummyArray[23][i]);
fprintf(stdout, "\nEnd of dummy output\n");
for (i=0;i<48;i++)
free(dummyArray[i]);
}
// END OF ADARSH DUMMY LOOP
runTest( argc, argv);
return EXIT_SUCCESS;
}
void usage(int argc, char **argv)
{
fprintf(stderr, "Usage: %s <max_rows/max_cols> <penalty> \n", argv[0]);
fprintf(stderr, "\t<dimension> - x and y dimensions\n");
fprintf(stderr, "\t<penalty> - penalty(positive integer)\n");
exit(1);
}
void runTest( int argc, char** argv)
{
int max_rows, max_cols, penalty;
int *input_itemsets, *output_itemsets, *referrence;
//int *matrix_cuda, *matrix_cuda_out, *referrence_cuda;
//int size;
// the lengths of the two sequences should be able to divided by 16.
// And at current stage max_rows needs to equal max_cols
if (argc == 3)
{
max_rows = atoi(argv[1]);
max_cols = atoi(argv[1]);
penalty = atoi(argv[2]);
}
else{
usage(argc, argv);
}
if(atoi(argv[1])%16!=0){
fprintf(stderr,"The dimension values must be a multiple of 16\n");
exit(1);
}
max_rows = max_rows + 1;
max_cols = max_cols + 1;
referrence = (int *)malloc( max_rows * max_cols * sizeof(int) );
input_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
output_itemsets = (int *)malloc( max_rows * max_cols * sizeof(int) );
if (!input_itemsets)
fprintf(stderr, "error: can not allocate memory");
srand ( 7 );
for (int i = 0 ; i < max_cols; i++){
for (int j = 0 ; j < max_rows; j++){
input_itemsets[i*max_cols+j] = 0;
}
}
printf("Start Needleman-Wunsch\n");
for( int i=1; i< max_rows ; i++){ //please define your own sequence.
input_itemsets[i*max_cols] = rand() % 10 + 1;
}
for( int j=1; j< max_cols ; j++){ //please define your own sequence.
input_itemsets[j] = rand() % 10 + 1;
}
for (int i = 1 ; i < max_cols; i++){
for (int j = 1 ; j < max_rows; j++){
referrence[i*max_cols+j] = blosum62[input_itemsets[i*max_cols]][input_itemsets[j]];
}
}
for( int i = 1; i< max_rows ; i++)
input_itemsets[i*max_cols] = -i * penalty;
for( int j = 1; j< max_cols ; j++)
input_itemsets[j] = -j * penalty;
// size = max_cols * max_rows;
//cudaMalloc((void**)& referrence_cuda, sizeof(int)*size);
//cudaMalloc((void**)& matrix_cuda, sizeof(int)*size);
//cudaMalloc((void**)& matrix_cuda_out, sizeof(int)*size);
#ifdef GEM5_FUSION
m5_dump_stats(0, 0);
m5_work_begin(0, 0);
#endif
//cudaMemcpy(referrence_cuda, referrence, sizeof(int) * size, cudaMemcpyHostToDevice);
//cudaMemcpy(matrix_cuda, input_itemsets, sizeof(int) * size, cudaMemcpyHostToDevice);
//REPEAT KERNEL FOR LONG GPU EXECUTION
for (int adp=0; adp<1000; adp++) {
printf("Starting GPU execution %d", adp+1);
dim3 dimGrid;
dim3 dimBlock(BLOCK_SIZE, 1);
int block_width = ( max_cols - 1 )/BLOCK_SIZE;
printf("Processing top-left matrix\n");
//process top-left matrix
for( int i = 1 ; i <= block_width ; i++){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_1<<<dimGrid, dimBlock>>>(referrence, input_itemsets, output_itemsets
,max_cols, penalty, i, block_width);
cudaThreadSynchronize();
}
printf("Processing bottom-right matrix\n");
//process bottom-right matrix
for( int i = block_width - 1 ; i >= 1 ; i--){
dimGrid.x = i;
dimGrid.y = 1;
needle_cuda_shared_2<<<dimGrid, dimBlock>>>(referrence, input_itemsets, output_itemsets
,max_cols, penalty, i, block_width);
cudaThreadSynchronize();
}
}
//cudaMemcpy(output_itemsets, matrix_cuda, sizeof(int) * size, cudaMemcpyDeviceToHost);
output_itemsets = input_itemsets;
#ifdef GEM5_FUSION
m5_work_end(0, 0);
#endif
#define TRACEBACK
#ifdef TRACEBACK
#ifdef GEM5_FUSION
m5_work_begin(1, 0);
#endif
FILE *fpo = stdout;
fprintf(fpo, "print traceback value GPU:\n");
for (int i = max_rows - 2, j = max_rows - 2; i>=0, j>=0;){
int nw, n, w, traceback;
if ( i == max_rows - 2 && j == max_rows - 2 )
fprintf(fpo, "%d ", output_itemsets[ i * max_cols + j]); //print the first element
if ( i == 0 && j == 0 )
break;
if ( i > 0 && j > 0 ){
nw = output_itemsets[(i - 1) * max_cols + j - 1];
w = output_itemsets[ i * max_cols + j - 1 ];
n = output_itemsets[(i - 1) * max_cols + j];
}
else if ( i == 0 ){
nw = n = LIMIT;
w = output_itemsets[ i * max_cols + j - 1 ];
}
else if ( j == 0 ){
nw = w = LIMIT;
n = output_itemsets[(i - 1) * max_cols + j];
}
else{
}
//traceback = maximum(nw, w, n);
int new_nw, new_w, new_n;
new_nw = nw + referrence[i * max_cols + j];
new_w = w - penalty;
new_n = n - penalty;
traceback = maximum(new_nw, new_w, new_n);
if(traceback == new_nw)
traceback = nw;
if(traceback == new_w)
traceback = w;
if(traceback == new_n)
traceback = n;
fprintf(fpo, "%d ", traceback);
if(traceback == nw )
{i--; j--; continue;}
else if(traceback == w )
{j--; continue;}
else if(traceback == n )
{i--; continue;}
else
;
}
fprintf(fpo, "\n");
fflush(fpo);
// fclose(fpo);
#ifdef GEM5_FUSION
m5_work_end(1, 0);
#endif
#endif
// cudaFree(referrence_cuda);
// cudaFree(matrix_cuda);
// cudaFree(matrix_cuda_out);
free(referrence);
free(input_itemsets);
//free(output_itemsets);
}
|
b9e639e0b98b17c4aed53f034cbdd3c3bf7b9214.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
} | b9e639e0b98b17c4aed53f034cbdd3c3bf7b9214.cu | #include "includes.h"
__global__ void query_ball_point_gpu(int b, int n, int m, const float *radius, int nsample, const float *xyz1, const float *xyz2, int *idx) {
int batch_index = blockIdx.x;
xyz1 += n*3*batch_index;
xyz2 += m*3*batch_index;
idx += m*nsample*batch_index;
int index = threadIdx.x;
int stride = blockDim.x;
for (int j=index;j<m;j+=stride) {
int cnt = 0;
for (int k=0;k<n;++k) {
if (cnt == nsample)
break; // only pick the FIRST nsample points in the ball
float x2=xyz2[j*3+0];
float y2=xyz2[j*3+1];
float z2=xyz2[j*3+2];
float x1=xyz1[k*3+0];
float y1=xyz1[k*3+1];
float z1=xyz1[k*3+2];
float d=max(sqrtf((x2-x1)*(x2-x1)+(y2-y1)*(y2-y1)+(z2-z1)*(z2-z1)),1e-20f);
if (d<radius[0]) {
if (cnt==0) { // set ALL indices to k, s.t. if there are less points in ball than nsample, we still have valid (repeating) indices
for (int l=0;l<nsample;++l)
idx[j*nsample+l] = k;
}
idx[j*nsample+cnt] = k;
cnt+=1;
}
}
}
} |
9e2f6b25679c3a06f11aa85f85f5909322b5b104.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* jacobi2D.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
#define POLYBENCH_TIME 1
#include "jacobi2D.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
A[i][j] = ((DATA_TYPE) i*(j+2) + 10) / N;
B[i][j] = ((DATA_TYPE) (i-4)*(j-1) + 11) / N;
}
}
}
void runJacobi2DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
for (int t = 0; t < _PB_TSTEPS; t++)
{
for (int i = 1; i < _PB_N - 1; i++)
{
for (int j = 1; j < _PB_N - 1; j++)
{
B[i][j] = 0.2f * (A[i][j] + A[i][(j-1)] + A[i][(1+j)] + A[(1+i)][j] + A[(i-1)][j]);
}
}
for (int i = 1; i < _PB_N-1; i++)
{
for (int j = 1; j < _PB_N-1; j++)
{
A[i][j] = B[i][j];
}
}
}
}
__global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1)))
{
B[i*N + j] = 0.2f * (A[i*N + j] + A[i*N + (j-1)] + A[i*N + (1 + j)] + A[(1 + i)*N + j] + A[(i-1)*N + j]);
}
}
__global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1)))
{
A[i*N + j] = B[i*N + j];
}
}
void compareResults(int n, DATA_TYPE POLYBENCH_2D(a,N,N,n,n), DATA_TYPE POLYBENCH_2D(a_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(b,N,N,n,n), DATA_TYPE POLYBENCH_2D(b_outputFromGpu,N,N,n,n))
{
int i, j, fail;
fail = 0;
// Compare output from CPU and GPU
for (i=0; i<n; i++)
{
for (j=0; j<n; j++)
{
if (percentDiff(a[i][j], a_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
for (i=0; i<n; i++)
{
for (j=0; j<n; j++)
{
if (percentDiff(b[i][j], b_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void runJacobi2DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(A_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n))
{
DATA_TYPE* Agpu;
DATA_TYPE* Bgpu;
hipMalloc(&Agpu, N * N * sizeof(DATA_TYPE));
hipMalloc(&Bgpu, N * N * sizeof(DATA_TYPE));
hipMemcpy(Agpu, A, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
hipMemcpy(Bgpu, B, N * N * sizeof(DATA_TYPE), hipMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), (unsigned int)ceil( ((float)N) / ((float)block.y) ));
/* Start timer. */
polybench_start_instruments;
for (int t = 0; t < _PB_TSTEPS; t++)
{
hipLaunchKernelGGL(( runJacobiCUDA_kernel1), dim3(grid),dim3(block), 0, 0, n, Agpu, Bgpu);
hipDeviceSynchronize();
hipLaunchKernelGGL(( runJacobiCUDA_kernel2), dim3(grid),dim3(block), 0, 0, n, Agpu, Bgpu);
hipDeviceSynchronize();
}
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
hipMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost);
hipMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N * N, hipMemcpyDeviceToHost);
hipFree(Agpu);
hipFree(Bgpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]);
if ((i * n + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
POLYBENCH_2D_ARRAY_DECL(a,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(b,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,N,n,n);
init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
runJacobi2DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
runJacobi2DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(a);
POLYBENCH_FREE_ARRAY(a_outputFromGpu);
POLYBENCH_FREE_ARRAY(b);
POLYBENCH_FREE_ARRAY(b_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
| 9e2f6b25679c3a06f11aa85f85f5909322b5b104.cu | /**
* jacobi2D.cu: This file is part of the PolyBench/GPU 1.0 test suite.
*
*
* Contact: Scott Grauer-Gray <[email protected]>
* Will Killian <[email protected]>
* Louis-Noel Pouchet <[email protected]>
* Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU
*/
#include <stdio.h>
#include <unistd.h>
#include <time.h>
#include <sys/time.h>
#include <string.h>
#include <stdlib.h>
#include <stdarg.h>
#include <math.h>
#define POLYBENCH_TIME 1
#include "jacobi2D.cuh"
#include "../../common/polybench.h"
#include "../../common/polybenchUtilFuncts.h"
//define the error threshold for the results "not matching"
#define PERCENT_DIFF_ERROR_THRESHOLD 0.05
#define RUN_ON_CPU
void init_array(int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
{
A[i][j] = ((DATA_TYPE) i*(j+2) + 10) / N;
B[i][j] = ((DATA_TYPE) (i-4)*(j-1) + 11) / N;
}
}
}
void runJacobi2DCpu(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n))
{
for (int t = 0; t < _PB_TSTEPS; t++)
{
for (int i = 1; i < _PB_N - 1; i++)
{
for (int j = 1; j < _PB_N - 1; j++)
{
B[i][j] = 0.2f * (A[i][j] + A[i][(j-1)] + A[i][(1+j)] + A[(1+i)][j] + A[(i-1)][j]);
}
}
for (int i = 1; i < _PB_N-1; i++)
{
for (int j = 1; j < _PB_N-1; j++)
{
A[i][j] = B[i][j];
}
}
}
}
__global__ void runJacobiCUDA_kernel1(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1)))
{
B[i*N + j] = 0.2f * (A[i*N + j] + A[i*N + (j-1)] + A[i*N + (1 + j)] + A[(1 + i)*N + j] + A[(i-1)*N + j]);
}
}
__global__ void runJacobiCUDA_kernel2(int n, DATA_TYPE* A, DATA_TYPE* B)
{
int i = blockIdx.y * blockDim.y + threadIdx.y;
int j = blockIdx.x * blockDim.x + threadIdx.x;
if ((i >= 1) && (i < (_PB_N-1)) && (j >= 1) && (j < (_PB_N-1)))
{
A[i*N + j] = B[i*N + j];
}
}
void compareResults(int n, DATA_TYPE POLYBENCH_2D(a,N,N,n,n), DATA_TYPE POLYBENCH_2D(a_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(b,N,N,n,n), DATA_TYPE POLYBENCH_2D(b_outputFromGpu,N,N,n,n))
{
int i, j, fail;
fail = 0;
// Compare output from CPU and GPU
for (i=0; i<n; i++)
{
for (j=0; j<n; j++)
{
if (percentDiff(a[i][j], a_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
for (i=0; i<n; i++)
{
for (j=0; j<n; j++)
{
if (percentDiff(b[i][j], b_outputFromGpu[i][j]) > PERCENT_DIFF_ERROR_THRESHOLD)
{
fail++;
}
}
}
// Print results
printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail);
}
void runJacobi2DCUDA(int tsteps, int n, DATA_TYPE POLYBENCH_2D(A,N,N,n,n), DATA_TYPE POLYBENCH_2D(B,N,N,n,n), DATA_TYPE POLYBENCH_2D(A_outputFromGpu,N,N,n,n), DATA_TYPE POLYBENCH_2D(B_outputFromGpu,N,N,n,n))
{
DATA_TYPE* Agpu;
DATA_TYPE* Bgpu;
cudaMalloc(&Agpu, N * N * sizeof(DATA_TYPE));
cudaMalloc(&Bgpu, N * N * sizeof(DATA_TYPE));
cudaMemcpy(Agpu, A, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
cudaMemcpy(Bgpu, B, N * N * sizeof(DATA_TYPE), cudaMemcpyHostToDevice);
dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y);
dim3 grid((unsigned int)ceil( ((float)N) / ((float)block.x) ), (unsigned int)ceil( ((float)N) / ((float)block.y) ));
/* Start timer. */
polybench_start_instruments;
for (int t = 0; t < _PB_TSTEPS; t++)
{
runJacobiCUDA_kernel1<<<grid,block>>>(n, Agpu, Bgpu);
cudaThreadSynchronize();
runJacobiCUDA_kernel2<<<grid,block>>>(n, Agpu, Bgpu);
cudaThreadSynchronize();
}
/* Stop and print timer. */
printf("GPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
cudaMemcpy(A_outputFromGpu, Agpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost);
cudaMemcpy(B_outputFromGpu, Bgpu, sizeof(DATA_TYPE) * N * N, cudaMemcpyDeviceToHost);
cudaFree(Agpu);
cudaFree(Bgpu);
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int n,
DATA_TYPE POLYBENCH_2D(A,N,N,n,n))
{
int i, j;
for (i = 0; i < n; i++)
for (j = 0; j < n; j++) {
fprintf(stderr, DATA_PRINTF_MODIFIER, A[i][j]);
if ((i * n + j) % 20 == 0) fprintf(stderr, "\n");
}
fprintf(stderr, "\n");
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int n = N;
int tsteps = TSTEPS;
POLYBENCH_2D_ARRAY_DECL(a,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(b,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(a_outputFromGpu,DATA_TYPE,N,N,n,n);
POLYBENCH_2D_ARRAY_DECL(b_outputFromGpu,DATA_TYPE,N,N,n,n);
init_array(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
runJacobi2DCUDA(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b_outputFromGpu));
#ifdef RUN_ON_CPU
/* Start timer. */
polybench_start_instruments;
runJacobi2DCpu(tsteps, n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(b));
/* Stop and print timer. */
printf("CPU Time in seconds:\n");
polybench_stop_instruments;
polybench_print_instruments;
compareResults(n, POLYBENCH_ARRAY(a), POLYBENCH_ARRAY(a_outputFromGpu), POLYBENCH_ARRAY(b), POLYBENCH_ARRAY(b_outputFromGpu));
#else //prevent dead code elimination
polybench_prevent_dce(print_array(n, POLYBENCH_ARRAY(a_outputFromGpu)));
#endif //RUN_ON_CPU
POLYBENCH_FREE_ARRAY(a);
POLYBENCH_FREE_ARRAY(a_outputFromGpu);
POLYBENCH_FREE_ARRAY(b);
POLYBENCH_FREE_ARRAY(b_outputFromGpu);
return 0;
}
#include "../../common/polybench.c"
|
0f726c3d3b7bf7129d92111612ca64eedd6b7b19.hip | // !!! This is a file automatically generated by hipify!!!
// CUDA single thread
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
__global__
void rgb2grey_kernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
for (size_t r = 0; r < numRows; ++r) {
for (size_t c = 0; c < numCols; ++c) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
void rgb2grey_cuda( const uchar4* const h_rgbaImage,
unsigned char *const h_greyImage,
size_t numRows, size_t numCols)
{
uchar4 *d_rgbaImage;
unsigned char *d_greyImage;
size_t numPixels= numRows*numCols;
// Alloc memory
hipMalloc((void **) &d_rgbaImage, sizeof(uchar4) * numPixels);
hipMalloc((void **) &d_greyImage, sizeof(uchar4) * numPixels);
// Copy from Host to Device
hipMemset(d_greyImage, 0, numPixels * sizeof(unsigned char));
hipMemcpy(d_rgbaImage, h_rgbaImage, sizeof(uchar4) * numPixels, hipMemcpyHostToDevice);
// Kernel Launch
hipLaunchKernelGGL(( rgb2grey_kernel), dim3(1), dim3(1), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols);
// Copy from Device to Host
hipMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, hipMemcpyDeviceToHost);
// Free memory
hipDeviceSynchronize();
hipFree(d_greyImage);
hipFree(d_rgbaImage);
}
| 0f726c3d3b7bf7129d92111612ca64eedd6b7b19.cu | // CUDA single thread
#include <cuda.h>
#include <cuda_runtime.h>
__global__
void rgb2grey_kernel(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
for (size_t r = 0; r < numRows; ++r) {
for (size_t c = 0; c < numCols; ++c) {
uchar4 rgba = rgbaImage[r * numCols + c];
float channelSum = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
greyImage[r * numCols + c] = channelSum;
}
}
}
void rgb2grey_cuda( const uchar4* const h_rgbaImage,
unsigned char *const h_greyImage,
size_t numRows, size_t numCols)
{
uchar4 *d_rgbaImage;
unsigned char *d_greyImage;
size_t numPixels= numRows*numCols;
// Alloc memory
cudaMalloc((void **) &d_rgbaImage, sizeof(uchar4) * numPixels);
cudaMalloc((void **) &d_greyImage, sizeof(uchar4) * numPixels);
// Copy from Host to Device
cudaMemset(d_greyImage, 0, numPixels * sizeof(unsigned char));
cudaMemcpy(d_rgbaImage, h_rgbaImage, sizeof(uchar4) * numPixels, cudaMemcpyHostToDevice);
// Kernel Launch
rgb2grey_kernel<<<1, 1>>>(d_rgbaImage, d_greyImage, numRows, numCols);
// Copy from Device to Host
cudaMemcpy(h_greyImage, d_greyImage, sizeof(unsigned char) * numPixels, cudaMemcpyDeviceToHost);
// Free memory
cudaDeviceSynchronize();
cudaFree(d_greyImage);
cudaFree(d_rgbaImage);
}
|
249228eef2d56e890de305b5df28fa0b03c18eb0.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators))
, dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS),
0, Caffe::cuda_stream(),
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
CUDA_CHECK(hipStreamSynchronize(Caffe::cuda_stream()));
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
| 249228eef2d56e890de305b5df28fa0b03c18eb0.cu | #include <vector>
#include "caffe/layers/slice_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void Slice(const int nthreads, const Dtype* in_data,
const bool forward, const int num_slices, const int slice_size,
const int bottom_slice_axis, const int top_slice_axis,
const int offset_slice_axis, Dtype* out_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int total_slice_size = slice_size * top_slice_axis;
const int slice_num = index / total_slice_size;
const int slice_index = index % total_slice_size;
const int bottom_index = slice_index +
(slice_num * bottom_slice_axis + offset_slice_axis) * slice_size;
if (forward) {
out_data[index] = in_data[bottom_index];
} else {
out_data[bottom_index] = in_data[index];
}
}
}
template <typename Dtype>
void SliceLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
if (top.size() == 1) { return; }
int offset_slice_axis = 0;
const Dtype* bottom_data = bottom[0]->gpu_data();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = true;
for (int i = 0; i < top.size(); ++i) {
Dtype* top_data = top[i]->mutable_gpu_data();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
nthreads, bottom_data, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, top_data);
offset_slice_axis += top_slice_axis;
}
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
}
template <typename Dtype>
void SliceLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
if (!propagate_down[0] || top.size() == 1) { return; }
int offset_slice_axis = 0;
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int bottom_slice_axis = bottom[0]->shape(slice_axis_);
const bool kForward = false;
for (int i = 0; i < top.size(); ++i) {
const Dtype* top_diff = top[i]->gpu_diff();
const int top_slice_axis = top[i]->shape(slice_axis_);
const int top_slice_size = top_slice_axis * slice_size_;
const int nthreads = top_slice_size * num_slices_;
Slice<Dtype> // NOLINT_NEXT_LINE(whitespace/operators)
<<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS,
0, Caffe::cuda_stream()>>>(
nthreads, top_diff, kForward, num_slices_, slice_size_,
bottom_slice_axis, top_slice_axis, offset_slice_axis, bottom_diff);
offset_slice_axis += top_slice_axis;
}
CUDA_CHECK(cudaStreamSynchronize(Caffe::cuda_stream()));
}
INSTANTIATE_LAYER_GPU_FUNCS(SliceLayer);
} // namespace caffe
|
f2871aeb0ced17814f92d96e013aecccdbbd1172.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <../src/vec/is/sf/impls/basic/sfpack.h>
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt,PetscInt tid)
{
PetscInt i,j,k,m,n,r;
const PetscInt *offset,*start,*dx,*dy,*X,*Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2*n + 2;
dy = opt + 3*n + 2;
X = opt + 5*n + 2;
Y = opt + 6*n + 2;
for (r=0; r<n; r++) {if (tid < offset[r+1]) break;}
m = (tid - offset[r]);
k = m/(dx[r]*dy[r]);
j = (m - k*dx[r]*dy[r])/dx[r];
i = m - k*dx[r]*dy[r] - j*dx[r];
return (start[r] + k*X[r]*Y[r] + j*X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,const Type *data,Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) buf[s+i] = data[t+i];
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,Type *data,const Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) op(data[t+i],buf[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,Type *leafbuf)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = tid*MBS;
for (i=0; i<MBS; i++) leafbuf[l+i] = op(rootdata[r+i],leafbuf[l+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs,PetscInt count,PetscInt srcx,PetscInt srcy,PetscInt srcX,PetscInt srcY,PetscInt srcStart,const PetscInt* srcIdx,const Type *src,PetscInt dstx,PetscInt dsty,PetscInt dstX,PetscInt dstY,PetscInt dstStart,const PetscInt *dstIdx,Type *dst)
{
PetscInt i,j,k,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid/(srcx*srcy);
j = (tid - k*srcx*srcy)/srcx;
i = tid - k*srcx*srcy - j*srcx;
s = srcStart + k*srcX*srcY + j*srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid/(dstx*dsty);
j = (tid - k*dstx*dsty)/dstx;
i = tid - k*dstx*dsty - j*dstx;
t = dstStart + k*dstX*dstY + j*dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i=0; i<MBS; i++) op(dst[t+i],src[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,PetscInt leafstart,const PetscInt *leafopt,const PetscInt *leafidx,const Type *leafdata,Type *leafupdate)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = (leafopt? MapTidToIndex(leafopt,tid) : (leafidx? leafidx[tid] : leafstart+tid))*MBS;
for (i=0; i<MBS; i++) leafupdate[l+i] = op(rootdata[r+i],leafdata[l+i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPI_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((ullint*)address,__double_as_longlong(val)));}
__device__ static llint atomicExch(llint* address,llint val) {return (llint)(atomicExch((ullint*)address,(ullint)val));}
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
return op(xp[0],yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
__device__ static llint atomicAdd(llint* address,llint val) {return (llint)atomicAdd((ullint*)address,(ullint)val);}
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
ullint *address_as_ull = (ullint*)address;
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
__device__ static llint atomicMult(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val*(llint)assumed));
} while (assumed != old);
return (llint)old;
}
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static llint atomicMin(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMin(val,(llint)assumed)));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicMax(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMax(val,(llint)assumed)));
} while (assumed != old);
return (llint)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin() above */
__device__ static llint atomicAnd(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val & (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicOr(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val | (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicXor(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val ^ (llint)assumed));
} while (assumed != old);
return (llint)old;
}
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
ullint *address_as_ull = (ullint*)(&x);
ullint old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,const void *data,void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
if (!opt && !idx) { /* It is a 'CUDA data to nvshmem buf' memory copy */
cerr = hipMemcpyAsync(buf,(char*)data+start*link->unitbytes,count*link->unitbytes,hipMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_Pack<Type,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(const Type*)data,(Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* To specialize UnpackAndOp for the hipMemcpyAsync() below. Usually if this is a contiguous memcpy, we use root/leafdirect and do
not need UnpackAndOp. Only with nvshmem, we need this 'nvshmem buf to CUDA data' memory copy
*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Unpack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
if (!opt && !idx) { /* It is a 'nvshmem buf to CUDA data' memory copy */
cerr = hipMemcpyAsync((char*)data+start*link->unitbytes,buf,count*link->unitbytes,hipMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_UnpackAndOp<Type,Insert<Type>,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_UnpackAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,void *buf)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_FetchAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,start,iarray,idx,(Type*)data,(Type*)buf);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscInt srcx=0,srcy=0,srcX=0,srcY=0,dstx=0,dsty=0,dstX=0,dstY=0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {srcx = srcOpt->dx[0]; srcy = srcOpt->dy[0]; srcX = srcOpt->X[0]; srcY = srcOpt->Y[0]; srcStart = srcOpt->start[0]; srcIdx = NULL;}
else if (!srcIdx) {srcx = srcX = count; srcy = srcY = 1;}
if (dstOpt) {dstx = dstOpt->dx[0]; dsty = dstOpt->dy[0]; dstX = dstOpt->X[0]; dstY = dstOpt->Y[0]; dstStart = dstOpt->start[0]; dstIdx = NULL;}
else if (!dstIdx) {dstx = dstX = count; dsty = dstY = 1;}
hipLaunchKernelGGL(( d_ScatterAndOp<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,srcx,srcy,srcX,srcY,srcStart,srcIdx,(const Type*)src,dstx,dsty,dstX,dstY,dstStart,dstIdx,(Type*)dst);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/* Specialization for Insert since we may use hipMemcpyAsync */
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
cerr = hipMemcpyAsync((Type*)dst+dstStart*link->bs,(const Type*)src+srcStart*link->bs,count*link->unitbytes,hipMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
ierr = ScatterAndOp<Type,Insert<Type>,BS,EQ>(link,count,srcStart,srcOpt,srcIdx,src,dstStart,dstOpt,dstIdx,dst);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link,PetscInt count,PetscInt rootstart,PetscSFPackOpt rootopt,const PetscInt *rootidx,void *rootdata,PetscInt leafstart,PetscSFPackOpt leafopt,const PetscInt *leafidx,const void *leafdata,void *leafupdate)
{
hipError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
hipLaunchKernelGGL(( d_FetchAndOpLocal<Type,Op,BS,EQ>), dim3(nblocks),dim3(nthreads),0,link->stream, link->bs,count,rootstart,rarray,rootidx,(Type*)rootdata,leafstart,larray,leafidx,(const Type*)leafdata,(Type*)leafupdate);
cerr = hipGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp <Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp <Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type ,BS,EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp <Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal <Type,Add <Type> ,BS,EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal <Type,AtomicAdd<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link)
{
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type,AtomicAdd<Type>,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFLink link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type,Max<Type> ,BS,EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type>,BS,EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = Unpack<Type,1,1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type,Maxloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_ScatterAndInsert = ScatterAndOp<Type,Insert<Type>,1,1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type,Maxloc<Type>,1,1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_CUDA(PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
cerr = hipDeviceSynchronize();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkSyncStream_CUDA(PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
cerr = hipStreamSynchronize(link->stream);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkMemcpy_CUDA(PetscSFLink link,PetscMemType dstmtype,void* dst,PetscMemType srcmtype,const void*src,size_t n)
{
PetscFunctionBegin;
enum hipMemcpyKind kinds[2][2] = {{hipMemcpyHostToHost,hipMemcpyHostToDevice},{hipMemcpyDeviceToHost,hipMemcpyDeviceToDevice}};
if (n) {
if (PetscMemTypeHost(dstmtype) && PetscMemTypeHost(srcmtype)) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscErrorCode ierr = PetscMemcpy(dst,src,n);CHKERRQ(ierr);
} else {
int stype = PetscMemTypeDevice(srcmtype) ? 1 : 0;
int dtype = PetscMemTypeDevice(dstmtype) ? 1 : 0;
hipError_t cerr = hipMemcpyAsync(dst,src,n,kinds[stype][dtype],link->stream);CHKERRCUDA(cerr);
}
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFMalloc_CUDA(PetscMemType mtype,size_t size,void** ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) {PetscErrorCode ierr = PetscMalloc(size,ptr);CHKERRQ(ierr);}
else if (PetscMemTypeDevice(mtype)) {
PetscErrorCode ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr);
hipError_t err = hipMalloc(ptr,size);CHKERRCUDA(err);
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFFree_CUDA(PetscMemType mtype,void* ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) {PetscErrorCode ierr = PetscFree(ptr);CHKERRQ(ierr);}
else if (PetscMemTypeDevice(mtype)) {hipError_t err = hipFree(ptr);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d",(int)mtype);
PetscFunctionReturn(0);
}
/* Destructor when the link uses MPI for communication on CUDA device */
static PetscErrorCode PetscSFLinkDestroy_MPI_CUDA(PetscSF sf,PetscSFLink link)
{
hipError_t cerr;
PetscFunctionBegin;
for (int i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) {
cerr = hipFree(link->rootbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
cerr = hipFree(link->leafbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_CUDA(PetscSF sf,PetscSFLink link,MPI_Datatype unit)
{
PetscErrorCode ierr;
hipError_t cerr;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt && sizeof(PetscInt) == sizeof(llint)) {
if (nPetscInt == 8) PackInit_IntegerType<llint,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<llint,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<llint,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<llint,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<llint,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<llint,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<llint,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<llint,1,0>(link);
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRMPI(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct hipDeviceProp_t props;
cerr = hipGetDevice(&device);CHKERRCUDA(cerr);
cerr = hipGetDeviceProperties(&props,device);CHKERRCUDA(cerr);
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor*props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->stream = PetscDefaultCudaStream;
link->Destroy = PetscSFLinkDestroy_MPI_CUDA;
link->SyncDevice = PetscSFLinkSyncDevice_CUDA;
link->SyncStream = PetscSFLinkSyncStream_CUDA;
link->Memcpy = PetscSFLinkMemcpy_CUDA;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
| f2871aeb0ced17814f92d96e013aecccdbbd1172.cu | #include <../src/vec/is/sf/impls/basic/sfpack.h>
/* Map a thread id to an index in root/leaf space through a series of 3D subdomains. See PetscSFPackOpt. */
__device__ static inline PetscInt MapTidToIndex(const PetscInt *opt,PetscInt tid)
{
PetscInt i,j,k,m,n,r;
const PetscInt *offset,*start,*dx,*dy,*X,*Y;
n = opt[0];
offset = opt + 1;
start = opt + n + 2;
dx = opt + 2*n + 2;
dy = opt + 3*n + 2;
X = opt + 5*n + 2;
Y = opt + 6*n + 2;
for (r=0; r<n; r++) {if (tid < offset[r+1]) break;}
m = (tid - offset[r]);
k = m/(dx[r]*dy[r]);
j = (m - k*dx[r]*dy[r])/dx[r];
i = m - k*dx[r]*dy[r] - j*dx[r];
return (start[r] + k*X[r]*Y[r] + j*X[r] + i);
}
/*====================================================================================*/
/* Templated CUDA kernels for pack/unpack. The Op can be regular or atomic */
/*====================================================================================*/
/* Suppose user calls PetscSFReduce(sf,unit,...) and <unit> is an MPI data type made of 16 PetscReals, then
<Type> is PetscReal, which is the primitive type we operate on.
<bs> is 16, which says <unit> contains 16 primitive types.
<BS> is 8, which is the maximal SIMD width we will try to vectorize operations on <unit>.
<EQ> is 0, which is (bs == BS ? 1 : 0)
If instead, <unit> has 8 PetscReals, then bs=8, BS=8, EQ=1, rendering MBS below to a compile time constant.
For the common case in VecScatter, bs=1, BS=1, EQ=1, MBS=1, the inner for-loops below will be totally unrolled.
*/
template<class Type,PetscInt BS,PetscInt EQ>
__global__ static void d_Pack(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,const Type *data,Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS; /* If EQ, then M=1 enables compiler's const-propagation */
const PetscInt MBS = M*BS; /* MBS=bs. We turn MBS into a compile-time const when EQ=1. */
for (; tid<count; tid += grid_size) {
/* opt != NULL ==> idx == NULL, i.e., the indices have patterns but not contiguous;
opt == NULL && idx == NULL ==> the indices are contiguous;
*/
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) buf[s+i] = data[t+i];
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_UnpackAndOp(PetscInt bs,PetscInt count,PetscInt start,const PetscInt *opt,const PetscInt *idx,Type *data,const Type *buf)
{
PetscInt i,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
t = (opt? MapTidToIndex(opt,tid) : (idx? idx[tid] : start+tid))*MBS;
s = tid*MBS;
for (i=0; i<MBS; i++) op(data[t+i],buf[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOp(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,Type *leafbuf)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = tid*MBS;
for (i=0; i<MBS; i++) leafbuf[l+i] = op(rootdata[r+i],leafbuf[l+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_ScatterAndOp(PetscInt bs,PetscInt count,PetscInt srcx,PetscInt srcy,PetscInt srcX,PetscInt srcY,PetscInt srcStart,const PetscInt* srcIdx,const Type *src,PetscInt dstx,PetscInt dsty,PetscInt dstX,PetscInt dstY,PetscInt dstStart,const PetscInt *dstIdx,Type *dst)
{
PetscInt i,j,k,s,t,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
if (!srcIdx) { /* src is either contiguous or 3D */
k = tid/(srcx*srcy);
j = (tid - k*srcx*srcy)/srcx;
i = tid - k*srcx*srcy - j*srcx;
s = srcStart + k*srcX*srcY + j*srcX + i;
} else {
s = srcIdx[tid];
}
if (!dstIdx) { /* dst is either contiguous or 3D */
k = tid/(dstx*dsty);
j = (tid - k*dstx*dsty)/dstx;
i = tid - k*dstx*dsty - j*dstx;
t = dstStart + k*dstX*dstY + j*dstX + i;
} else {
t = dstIdx[tid];
}
s *= MBS;
t *= MBS;
for (i=0; i<MBS; i++) op(dst[t+i],src[s+i]);
}
}
template<class Type,class Op,PetscInt BS,PetscInt EQ>
__global__ static void d_FetchAndOpLocal(PetscInt bs,PetscInt count,PetscInt rootstart,const PetscInt *rootopt,const PetscInt *rootidx,Type *rootdata,PetscInt leafstart,const PetscInt *leafopt,const PetscInt *leafidx,const Type *leafdata,Type *leafupdate)
{
PetscInt i,l,r,tid = blockIdx.x*blockDim.x + threadIdx.x;
const PetscInt grid_size = gridDim.x * blockDim.x;
const PetscInt M = (EQ) ? 1 : bs/BS, MBS = M*BS;
Op op;
for (; tid<count; tid += grid_size) {
r = (rootopt? MapTidToIndex(rootopt,tid) : (rootidx? rootidx[tid] : rootstart+tid))*MBS;
l = (leafopt? MapTidToIndex(leafopt,tid) : (leafidx? leafidx[tid] : leafstart+tid))*MBS;
for (i=0; i<MBS; i++) leafupdate[l+i] = op(rootdata[r+i],leafdata[l+i]);
}
}
/*====================================================================================*/
/* Regular operations on device */
/*====================================================================================*/
template<typename Type> struct Insert {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = y; return old;}};
template<typename Type> struct Add {__device__ Type operator() (Type& x,Type y) const {Type old = x; x += y; return old;}};
template<typename Type> struct Mult {__device__ Type operator() (Type& x,Type y) const {Type old = x; x *= y; return old;}};
template<typename Type> struct Min {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMin(x,y); return old;}};
template<typename Type> struct Max {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = PetscMax(x,y); return old;}};
template<typename Type> struct LAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x && y; return old;}};
template<typename Type> struct LOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x || y; return old;}};
template<typename Type> struct LXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = !x != !y; return old;}};
template<typename Type> struct BAND {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x & y; return old;}};
template<typename Type> struct BOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x | y; return old;}};
template<typename Type> struct BXOR {__device__ Type operator() (Type& x,Type y) const {Type old = x; x = x ^ y; return old;}};
template<typename Type> struct Minloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a < x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b);
return old;
}
};
template<typename Type> struct Maxloc {
__device__ Type operator() (Type& x,Type y) const {
Type old = x;
if (y.a > x.a) x = y;
else if (y.a == x.a) x.b = min(x.b,y.b); /* See MPI MAXLOC */
return old;
}
};
/*====================================================================================*/
/* Atomic operations on device */
/*====================================================================================*/
/*
Atomic Insert (exchange) operations
CUDA C Programming Guide V10.1 Chapter B.12.1.3:
int atomicExch(int* address, int val);
unsigned int atomicExch(unsigned int* address, unsigned int val);
unsigned long long int atomicExch(unsigned long long int* address, unsigned long long int val);
float atomicExch(float* address, float val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory and stores val back to memory at the same address. These two operations are
performed in one atomic transaction. The function returns old.
PETSc notes:
It may be useful in PetscSFFetchAndOp with op = MPI_REPLACE.
VecScatter with multiple entries scattered to the same location using INSERT_VALUES does not need
atomic insertion, since it does not need the old value. A 32-bit or 64-bit store instruction should
be atomic itself.
With bs>1 and a unit > 64 bits, the current element-wise atomic approach can not guarantee the whole
insertion is atomic. Hope no user codes rely on that.
*/
__device__ static double atomicExch(double* address,double val) {return __longlong_as_double(atomicExch((ullint*)address,__double_as_longlong(val)));}
__device__ static llint atomicExch(llint* address,llint val) {return (llint)(atomicExch((ullint*)address,(ullint)val));}
template<typename Type> struct AtomicInsert {__device__ Type operator() (Type& x,Type y) const {return atomicExch(&x,y);}};
#if defined(PETSC_HAVE_COMPLEX)
#if defined(PETSC_USE_REAL_DOUBLE)
/* CUDA does not support 128-bit atomics. Users should not insert different 128-bit PetscComplex values to the same location */
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#elif defined(PETSC_USE_REAL_SINGLE)
template<> struct AtomicInsert<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
double *xp = (double*)&x,*yp = (double*)&y;
AtomicInsert<double> op;
return op(xp[0],yp[0]);
}
};
#endif
#endif
/*
Atomic add operations
CUDA C Programming Guide V10.1 Chapter B.12.1.1:
int atomicAdd(int* address, int val);
unsigned int atomicAdd(unsigned int* address,unsigned int val);
unsigned long long int atomicAdd(unsigned long long int* address,unsigned long long int val);
float atomicAdd(float* address, float val);
double atomicAdd(double* address, double val);
__half2 atomicAdd(__half2 *address, __half2 val);
__half atomicAdd(__half *address, __half val);
reads the 16-bit, 32-bit or 64-bit word old located at the address address in global or shared memory, computes (old + val),
and stores the result back to memory at the same address. These three operations are performed in one atomic transaction. The
function returns old.
The 32-bit floating-point version of atomicAdd() is only supported by devices of compute capability 2.x and higher.
The 64-bit floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and higher.
The 32-bit __half2 floating-point version of atomicAdd() is only supported by devices of compute capability 6.x and
higher. The atomicity of the __half2 add operation is guaranteed separately for each of the two __half elements;
the entire __half2 is not guaranteed to be atomic as a single 32-bit access.
The 16-bit __half floating-point version of atomicAdd() is only supported by devices of compute capability 7.x and higher.
*/
__device__ static llint atomicAdd(llint* address,llint val) {return (llint)atomicAdd((ullint*)address,(ullint)val);}
template<typename Type> struct AtomicAdd {__device__ Type operator() (Type& x,Type y) const {return atomicAdd(&x,y);}};
template<> struct AtomicAdd<double> {
__device__ double operator() (double& x,double y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 600)
return atomicAdd(&x,y);
#else
double *address = &x, val = y;
ullint *address_as_ull = (ullint*)address;
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val + __longlong_as_double(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __longlong_as_double(old);
#endif
}
};
template<> struct AtomicAdd<float> {
__device__ float operator() (float& x,float y) const {
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ >= 200)
return atomicAdd(&x,y);
#else
float *address = &x, val = y;
int *address_as_int = (int*)address;
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val + __int_as_float(assumed)));
/* Note: uses integer comparison to avoid hang in case of NaN (since NaN !=NaN) */
} while (assumed != old);
return __int_as_float(old);
#endif
}
};
#if defined(PETSC_HAVE_COMPLEX)
template<> struct AtomicAdd<PetscComplex> {
__device__ PetscComplex operator() (PetscComplex& x,PetscComplex y) const {
PetscComplex old, *z = &old;
PetscReal *xp = (PetscReal*)&x,*yp = (PetscReal*)&y;
AtomicAdd<PetscReal> op;
z[0] = op(xp[0],yp[0]);
z[1] = op(xp[1],yp[1]);
return old; /* The returned value may not be atomic. It can be mix of two ops. Caller should discard it. */
}
};
#endif
/*
Atomic Mult operations:
CUDA has no atomicMult at all, so we build our own with atomicCAS
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMult(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
/* Other threads can access and modify value of *address_as_ull after the read above and before the write below */
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val*__longlong_as_double(assumed)));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMult(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(val*__int_as_float(assumed)));
} while (assumed != old);
return __int_as_float(old);
}
#endif
__device__ static int atomicMult(int* address,int val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, val*assumed);
} while (assumed != old);
return (int)old;
}
__device__ static llint atomicMult(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val*(llint)assumed));
} while (assumed != old);
return (llint)old;
}
template<typename Type> struct AtomicMult {__device__ Type operator() (Type& x,Type y) const {return atomicMult(&x,y);}};
/*
Atomic Min/Max operations
CUDA C Programming Guide V10.1 Chapter B.12.1.4~5:
int atomicMin(int* address, int val);
unsigned int atomicMin(unsigned int* address,unsigned int val);
unsigned long long int atomicMin(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes the minimum of old and val, and stores the result back to memory
at the same address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicMin() is only supported by devices of compute capability 3.5 and higher.
atomicMax() is similar.
*/
#if defined(PETSC_USE_REAL_DOUBLE)
__device__ static double atomicMin(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMin(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
__device__ static double atomicMax(double* address, double val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, __double_as_longlong(PetscMax(val,__longlong_as_double(assumed))));
} while (assumed != old);
return __longlong_as_double(old);
}
#elif defined(PETSC_USE_REAL_SINGLE)
__device__ static float atomicMin(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMin(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
__device__ static float atomicMax(float* address,float val)
{
int *address_as_int = (int*)(address);
int old = *address_as_int, assumed;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, __float_as_int(PetscMax(val,__int_as_float(assumed))));
} while (assumed != old);
return __int_as_float(old);
}
#endif
/*
atomicMin/Max(long long *, long long) are not in Nvidia's documentation. But on OLCF Summit we found
atomicMin/Max/And/Or/Xor(long long *, long long) in /sw/summit/cuda/10.1.243/include/sm_32_atomic_functions.h.
This causes compilation errors with pgi compilers and 64-bit indices:
error: function "atomicMin(long long *, long long)" has already been defined
So we add extra conditions defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320)
__device__ static llint atomicMin(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMin(val,(llint)assumed)));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicMax(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(PetscMax(val,(llint)assumed)));
} while (assumed != old);
return (llint)old;
}
#endif
template<typename Type> struct AtomicMin {__device__ Type operator() (Type& x,Type y) const {return atomicMin(&x,y);}};
template<typename Type> struct AtomicMax {__device__ Type operator() (Type& x,Type y) const {return atomicMax(&x,y);}};
/*
Atomic bitwise operations
CUDA C Programming Guide V10.1 Chapter B.12.2.1 ~ B.12.2.3:
int atomicAnd(int* address, int val);
unsigned int atomicAnd(unsigned int* address,unsigned int val);
unsigned long long int atomicAnd(unsigned long long int* address,unsigned long long int val);
reads the 32-bit or 64-bit word old located at the address address in global or shared
memory, computes (old & val), and stores the result back to memory at the same
address. These three operations are performed in one atomic transaction.
The function returns old.
The 64-bit version of atomicAnd() is only supported by devices of compute capability 3.5 and higher.
atomicOr() and atomicXor are similar.
*/
#if defined(__CUDA_ARCH__) && (__CUDA_ARCH__ < 320) /* Why 320? see comments at atomicMin() above */
__device__ static llint atomicAnd(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val & (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicOr(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val | (llint)assumed));
} while (assumed != old);
return (llint)old;
}
__device__ static llint atomicXor(llint* address,llint val)
{
ullint *address_as_ull = (ullint*)(address);
ullint old = *address_as_ull, assumed;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(val ^ (llint)assumed));
} while (assumed != old);
return (llint)old;
}
#endif
template<typename Type> struct AtomicBAND {__device__ Type operator() (Type& x,Type y) const {return atomicAnd(&x,y);}};
template<typename Type> struct AtomicBOR {__device__ Type operator() (Type& x,Type y) const {return atomicOr (&x,y);}};
template<typename Type> struct AtomicBXOR {__device__ Type operator() (Type& x,Type y) const {return atomicXor(&x,y);}};
/*
Atomic logical operations:
CUDA has no atomic logical operations at all. We support them on integer types.
*/
/* A template without definition makes any instantiation not using given specializations erroneous at compile time,
which is what we want since we only support 32-bit and 64-bit integers.
*/
template<typename Type,class Op,int size/* sizeof(Type) */> struct AtomicLogical;
template<typename Type,class Op>
struct AtomicLogical<Type,Op,4> {
__device__ Type operator()(Type& x,Type y) const {
int *address_as_int = (int*)(&x);
int old = *address_as_int, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_int, assumed, (int)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
template<typename Type,class Op>
struct AtomicLogical<Type,Op,8> {
__device__ Type operator()(Type& x,Type y) const {
ullint *address_as_ull = (ullint*)(&x);
ullint old = *address_as_ull, assumed;
Op op;
do {
assumed = old;
old = atomicCAS(address_as_ull, assumed, (ullint)(op((Type)assumed,y)));
} while (assumed != old);
return (Type)old;
}
};
/* Note land/lor/lxor below are different from LAND etc above. Here we pass arguments by value and return result of ops (not old value) */
template<typename Type> struct land {__device__ Type operator()(Type x, Type y) {return x && y;}};
template<typename Type> struct lor {__device__ Type operator()(Type x, Type y) {return x || y;}};
template<typename Type> struct lxor {__device__ Type operator()(Type x, Type y) {return (!x != !y);}};
template<typename Type> struct AtomicLAND {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,land<Type>,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lor<Type> ,sizeof(Type)> op; return op(x,y);}};
template<typename Type> struct AtomicLXOR {__device__ Type operator()(Type& x,Type y) const {AtomicLogical<Type,lxor<Type>,sizeof(Type)> op; return op(x,y);}};
/*====================================================================================*/
/* Wrapper functions of cuda kernels. Function pointers are stored in 'link' */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Pack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,const void *data,void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
if (!opt && !idx) { /* It is a 'CUDA data to nvshmem buf' memory copy */
cerr = cudaMemcpyAsync(buf,(char*)data+start*link->unitbytes,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_Pack<Type,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(const Type*)data,(Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* To specialize UnpackAndOp for the cudaMemcpyAsync() below. Usually if this is a contiguous memcpy, we use root/leafdirect and do
not need UnpackAndOp. Only with nvshmem, we need this 'nvshmem buf to CUDA data' memory copy
*/
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode Unpack(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
if (!opt && !idx) { /* It is a 'nvshmem buf to CUDA data' memory copy */
cerr = cudaMemcpyAsync((char*)data+start*link->unitbytes,buf,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_UnpackAndOp<Type,Insert<Type>,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode UnpackAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,const void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_UnpackAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(const Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOp(PetscSFLink link,PetscInt count,PetscInt start,PetscSFPackOpt opt,const PetscInt *idx,void *data,void *buf)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *iarray=opt ? opt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_FetchAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,start,iarray,idx,(Type*)data,(Type*)buf);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndOp(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
PetscInt srcx=0,srcy=0,srcX=0,srcY=0,dstx=0,dsty=0,dstX=0,dstY=0;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
/* The 3D shape of source subdomain may be different than that of the destination, which makes it difficult to use CUDA 3D grid and block */
if (srcOpt) {srcx = srcOpt->dx[0]; srcy = srcOpt->dy[0]; srcX = srcOpt->X[0]; srcY = srcOpt->Y[0]; srcStart = srcOpt->start[0]; srcIdx = NULL;}
else if (!srcIdx) {srcx = srcX = count; srcy = srcY = 1;}
if (dstOpt) {dstx = dstOpt->dx[0]; dsty = dstOpt->dy[0]; dstX = dstOpt->X[0]; dstY = dstOpt->Y[0]; dstStart = dstOpt->start[0]; dstIdx = NULL;}
else if (!dstIdx) {dstx = dstX = count; dsty = dstY = 1;}
d_ScatterAndOp<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,srcx,srcy,srcX,srcY,srcStart,srcIdx,(const Type*)src,dstx,dsty,dstX,dstY,dstStart,dstIdx,(Type*)dst);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/* Specialization for Insert since we may use cudaMemcpyAsync */
template<typename Type,PetscInt BS,PetscInt EQ>
static PetscErrorCode ScatterAndInsert(PetscSFLink link,PetscInt count,PetscInt srcStart,PetscSFPackOpt srcOpt,const PetscInt *srcIdx,const void *src,PetscInt dstStart,PetscSFPackOpt dstOpt,const PetscInt *dstIdx,void *dst)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
/*src and dst are contiguous */
if ((!srcOpt && !srcIdx) && (!dstOpt && !dstIdx) && src != dst) {
cerr = cudaMemcpyAsync((Type*)dst+dstStart*link->bs,(const Type*)src+srcStart*link->bs,count*link->unitbytes,cudaMemcpyDeviceToDevice,link->stream);CHKERRCUDA(cerr);
} else {
ierr = ScatterAndOp<Type,Insert<Type>,BS,EQ>(link,count,srcStart,srcOpt,srcIdx,src,dstStart,dstOpt,dstIdx,dst);CHKERRQ(ierr);
}
PetscFunctionReturn(0);
}
template<typename Type,class Op,PetscInt BS,PetscInt EQ>
static PetscErrorCode FetchAndOpLocal(PetscSFLink link,PetscInt count,PetscInt rootstart,PetscSFPackOpt rootopt,const PetscInt *rootidx,void *rootdata,PetscInt leafstart,PetscSFPackOpt leafopt,const PetscInt *leafidx,const void *leafdata,void *leafupdate)
{
cudaError_t cerr;
PetscInt nthreads=256;
PetscInt nblocks=(count+nthreads-1)/nthreads;
const PetscInt *rarray = rootopt ? rootopt->array : NULL;
const PetscInt *larray = leafopt ? leafopt->array : NULL;
PetscFunctionBegin;
if (!count) PetscFunctionReturn(0);
nblocks = PetscMin(nblocks,link->maxResidentThreadsPerGPU/nthreads);
d_FetchAndOpLocal<Type,Op,BS,EQ><<<nblocks,nthreads,0,link->stream>>>(link->bs,count,rootstart,rarray,rootidx,(Type*)rootdata,leafstart,larray,leafidx,(const Type*)leafdata,(Type*)leafupdate);
cerr = cudaGetLastError();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
/*====================================================================================*/
/* Init various types and instantiate pack/unpack function pointers */
/*====================================================================================*/
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_RealType(PetscSFLink link)
{
/* Pack/unpack for remote communication */
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp <Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp <Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
/* Scatter for local communication */
link->d_ScatterAndInsert = ScatterAndInsert<Type ,BS,EQ>; /* Has special optimizations */
link->d_ScatterAndAdd = ScatterAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp <Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp <Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp <Type,Max<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal <Type,Add <Type> ,BS,EQ>;
/* Atomic versions when there are data-race possibilities */
link->da_UnpackAndInsert = UnpackAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp <Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp <Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp <Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp <Type,AtomicMax<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal <Type,AtomicAdd<Type> ,BS,EQ>;
}
/* Have this templated class to specialize for char integers */
template<typename Type,PetscInt BS,PetscInt EQ,PetscInt size/*sizeof(Type)*/>
struct PackInit_IntegerType_Atomic {
static void Init(PetscSFLink link)
{
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_UnpackAndMult = UnpackAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_UnpackAndMin = UnpackAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_UnpackAndMax = UnpackAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_UnpackAndLAND = UnpackAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_UnpackAndLOR = UnpackAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_UnpackAndLXOR = UnpackAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_UnpackAndBAND = UnpackAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_UnpackAndBOR = UnpackAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_UnpackAndBXOR = UnpackAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAdd = FetchAndOp <Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type> ,BS,EQ>;
link->da_ScatterAndMult = ScatterAndOp<Type,AtomicMult<Type> ,BS,EQ>;
link->da_ScatterAndMin = ScatterAndOp<Type,AtomicMin<Type> ,BS,EQ>;
link->da_ScatterAndMax = ScatterAndOp<Type,AtomicMax<Type> ,BS,EQ>;
link->da_ScatterAndLAND = ScatterAndOp<Type,AtomicLAND<Type> ,BS,EQ>;
link->da_ScatterAndLOR = ScatterAndOp<Type,AtomicLOR<Type> ,BS,EQ>;
link->da_ScatterAndLXOR = ScatterAndOp<Type,AtomicLXOR<Type> ,BS,EQ>;
link->da_ScatterAndBAND = ScatterAndOp<Type,AtomicBAND<Type> ,BS,EQ>;
link->da_ScatterAndBOR = ScatterAndOp<Type,AtomicBOR<Type> ,BS,EQ>;
link->da_ScatterAndBXOR = ScatterAndOp<Type,AtomicBXOR<Type> ,BS,EQ>;
link->da_FetchAndAddLocal = FetchAndOpLocal<Type,AtomicAdd<Type>,BS,EQ>;
}
};
/* CUDA does not support atomics on chars. It is TBD in PETSc. */
template<typename Type,PetscInt BS,PetscInt EQ>
struct PackInit_IntegerType_Atomic<Type,BS,EQ,1> {
static void Init(PetscSFLink link) {/* Nothing to leave function pointers NULL */}
};
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_IntegerType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_UnpackAndMin = UnpackAndOp<Type,Min<Type> ,BS,EQ>;
link->d_UnpackAndMax = UnpackAndOp<Type,Max<Type> ,BS,EQ>;
link->d_UnpackAndLAND = UnpackAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_UnpackAndLOR = UnpackAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_UnpackAndLXOR = UnpackAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_UnpackAndBAND = UnpackAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_UnpackAndBOR = UnpackAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_UnpackAndBXOR = UnpackAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_ScatterAndMin = ScatterAndOp<Type,Min<Type> ,BS,EQ>;
link->d_ScatterAndMax = ScatterAndOp<Type,Max<Type> ,BS,EQ>;
link->d_ScatterAndLAND = ScatterAndOp<Type,LAND<Type> ,BS,EQ>;
link->d_ScatterAndLOR = ScatterAndOp<Type,LOR<Type> ,BS,EQ>;
link->d_ScatterAndLXOR = ScatterAndOp<Type,LXOR<Type> ,BS,EQ>;
link->d_ScatterAndBAND = ScatterAndOp<Type,BAND<Type> ,BS,EQ>;
link->d_ScatterAndBOR = ScatterAndOp<Type,BOR<Type> ,BS,EQ>;
link->d_ScatterAndBXOR = ScatterAndOp<Type,BXOR<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
PackInit_IntegerType_Atomic<Type,BS,EQ,sizeof(Type)>::Init(link);
}
#if defined(PETSC_HAVE_COMPLEX)
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_ComplexType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_UnpackAndAdd = UnpackAndOp<Type,Add<Type> ,BS,EQ>;
link->d_UnpackAndMult = UnpackAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAdd = FetchAndOp <Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
link->d_ScatterAndAdd = ScatterAndOp<Type,Add<Type> ,BS,EQ>;
link->d_ScatterAndMult = ScatterAndOp<Type,Mult<Type> ,BS,EQ>;
link->d_FetchAndAddLocal = FetchAndOpLocal<Type,Add<Type>,BS,EQ>;
link->da_UnpackAndInsert = UnpackAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_UnpackAndAdd = UnpackAndOp<Type,AtomicAdd<Type>,BS,EQ>;
link->da_UnpackAndMult = NULL; /* Not implemented yet */
link->da_FetchAndAdd = NULL; /* Return value of atomicAdd on complex is not atomic */
link->da_ScatterAndInsert = ScatterAndOp<Type,AtomicInsert<Type>,BS,EQ>;
link->da_ScatterAndAdd = ScatterAndOp<Type,AtomicAdd<Type>,BS,EQ>;
}
#endif
typedef signed char SignedChar;
typedef unsigned char UnsignedChar;
typedef struct {int a; int b; } PairInt;
typedef struct {PetscInt a; PetscInt b;} PairPetscInt;
template<typename Type>
static void PackInit_PairType(PetscSFLink link)
{
link->d_Pack = Pack<Type,1,1>;
link->d_UnpackAndInsert = Unpack<Type,1,1>;
link->d_UnpackAndMaxloc = UnpackAndOp<Type,Maxloc<Type>,1,1>;
link->d_UnpackAndMinloc = UnpackAndOp<Type,Minloc<Type>,1,1>;
link->d_ScatterAndInsert = ScatterAndOp<Type,Insert<Type>,1,1>;
link->d_ScatterAndMaxloc = ScatterAndOp<Type,Maxloc<Type>,1,1>;
link->d_ScatterAndMinloc = ScatterAndOp<Type,Minloc<Type>,1,1>;
/* Atomics for pair types are not implemented yet */
}
template<typename Type,PetscInt BS,PetscInt EQ>
static void PackInit_DumbType(PetscSFLink link)
{
link->d_Pack = Pack<Type,BS,EQ>;
link->d_UnpackAndInsert = Unpack<Type,BS,EQ>;
link->d_ScatterAndInsert = ScatterAndInsert<Type,BS,EQ>;
/* Atomics for dumb types are not implemented yet */
}
/* Some device-specific utilities */
static PetscErrorCode PetscSFLinkSyncDevice_CUDA(PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaDeviceSynchronize();CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkSyncStream_CUDA(PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
cerr = cudaStreamSynchronize(link->stream);CHKERRCUDA(cerr);
PetscFunctionReturn(0);
}
static PetscErrorCode PetscSFLinkMemcpy_CUDA(PetscSFLink link,PetscMemType dstmtype,void* dst,PetscMemType srcmtype,const void*src,size_t n)
{
PetscFunctionBegin;
enum cudaMemcpyKind kinds[2][2] = {{cudaMemcpyHostToHost,cudaMemcpyHostToDevice},{cudaMemcpyDeviceToHost,cudaMemcpyDeviceToDevice}};
if (n) {
if (PetscMemTypeHost(dstmtype) && PetscMemTypeHost(srcmtype)) { /* Separate HostToHost so that pure-cpu code won't call cuda runtime */
PetscErrorCode ierr = PetscMemcpy(dst,src,n);CHKERRQ(ierr);
} else {
int stype = PetscMemTypeDevice(srcmtype) ? 1 : 0;
int dtype = PetscMemTypeDevice(dstmtype) ? 1 : 0;
cudaError_t cerr = cudaMemcpyAsync(dst,src,n,kinds[stype][dtype],link->stream);CHKERRCUDA(cerr);
}
}
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFMalloc_CUDA(PetscMemType mtype,size_t size,void** ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) {PetscErrorCode ierr = PetscMalloc(size,ptr);CHKERRQ(ierr);}
else if (PetscMemTypeDevice(mtype)) {
PetscErrorCode ierr = PetscDeviceInitialize(PETSC_DEVICE_CUDA);CHKERRQ(ierr);
cudaError_t err = cudaMalloc(ptr,size);CHKERRCUDA(err);
} else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d", (int)mtype);
PetscFunctionReturn(0);
}
PetscErrorCode PetscSFFree_CUDA(PetscMemType mtype,void* ptr)
{
PetscFunctionBegin;
if (PetscMemTypeHost(mtype)) {PetscErrorCode ierr = PetscFree(ptr);CHKERRQ(ierr);}
else if (PetscMemTypeDevice(mtype)) {cudaError_t err = cudaFree(ptr);CHKERRCUDA(err);}
else SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong PetscMemType %d",(int)mtype);
PetscFunctionReturn(0);
}
/* Destructor when the link uses MPI for communication on CUDA device */
static PetscErrorCode PetscSFLinkDestroy_MPI_CUDA(PetscSF sf,PetscSFLink link)
{
cudaError_t cerr;
PetscFunctionBegin;
for (int i=PETSCSF_LOCAL; i<=PETSCSF_REMOTE; i++) {
cerr = cudaFree(link->rootbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
cerr = cudaFree(link->leafbuf_alloc[i][PETSC_MEMTYPE_DEVICE]);CHKERRCUDA(cerr);
}
PetscFunctionReturn(0);
}
/* Some fields of link are initialized by PetscSFPackSetUp_Host. This routine only does what needed on device */
PetscErrorCode PetscSFLinkSetUp_CUDA(PetscSF sf,PetscSFLink link,MPI_Datatype unit)
{
PetscErrorCode ierr;
cudaError_t cerr;
PetscInt nSignedChar=0,nUnsignedChar=0,nInt=0,nPetscInt=0,nPetscReal=0;
PetscBool is2Int,is2PetscInt;
#if defined(PETSC_HAVE_COMPLEX)
PetscInt nPetscComplex=0;
#endif
PetscFunctionBegin;
if (link->deviceinited) PetscFunctionReturn(0);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_SIGNED_CHAR, &nSignedChar);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPI_UNSIGNED_CHAR,&nUnsignedChar);CHKERRQ(ierr);
/* MPI_CHAR is treated below as a dumb type that does not support reduction according to MPI standard */
ierr = MPIPetsc_Type_compare_contig(unit,MPI_INT, &nInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_INT, &nPetscInt);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_REAL,&nPetscReal);CHKERRQ(ierr);
#if defined(PETSC_HAVE_COMPLEX)
ierr = MPIPetsc_Type_compare_contig(unit,MPIU_COMPLEX,&nPetscComplex);CHKERRQ(ierr);
#endif
ierr = MPIPetsc_Type_compare(unit,MPI_2INT,&is2Int);CHKERRQ(ierr);
ierr = MPIPetsc_Type_compare(unit,MPIU_2INT,&is2PetscInt);CHKERRQ(ierr);
if (is2Int) {
PackInit_PairType<PairInt>(link);
} else if (is2PetscInt) { /* TODO: when is2PetscInt and nPetscInt=2, we don't know which path to take. The two paths support different ops. */
PackInit_PairType<PairPetscInt>(link);
} else if (nPetscReal) {
if (nPetscReal == 8) PackInit_RealType<PetscReal,8,1>(link); else if (nPetscReal%8 == 0) PackInit_RealType<PetscReal,8,0>(link);
else if (nPetscReal == 4) PackInit_RealType<PetscReal,4,1>(link); else if (nPetscReal%4 == 0) PackInit_RealType<PetscReal,4,0>(link);
else if (nPetscReal == 2) PackInit_RealType<PetscReal,2,1>(link); else if (nPetscReal%2 == 0) PackInit_RealType<PetscReal,2,0>(link);
else if (nPetscReal == 1) PackInit_RealType<PetscReal,1,1>(link); else if (nPetscReal%1 == 0) PackInit_RealType<PetscReal,1,0>(link);
} else if (nPetscInt && sizeof(PetscInt) == sizeof(llint)) {
if (nPetscInt == 8) PackInit_IntegerType<llint,8,1>(link); else if (nPetscInt%8 == 0) PackInit_IntegerType<llint,8,0>(link);
else if (nPetscInt == 4) PackInit_IntegerType<llint,4,1>(link); else if (nPetscInt%4 == 0) PackInit_IntegerType<llint,4,0>(link);
else if (nPetscInt == 2) PackInit_IntegerType<llint,2,1>(link); else if (nPetscInt%2 == 0) PackInit_IntegerType<llint,2,0>(link);
else if (nPetscInt == 1) PackInit_IntegerType<llint,1,1>(link); else if (nPetscInt%1 == 0) PackInit_IntegerType<llint,1,0>(link);
} else if (nInt) {
if (nInt == 8) PackInit_IntegerType<int,8,1>(link); else if (nInt%8 == 0) PackInit_IntegerType<int,8,0>(link);
else if (nInt == 4) PackInit_IntegerType<int,4,1>(link); else if (nInt%4 == 0) PackInit_IntegerType<int,4,0>(link);
else if (nInt == 2) PackInit_IntegerType<int,2,1>(link); else if (nInt%2 == 0) PackInit_IntegerType<int,2,0>(link);
else if (nInt == 1) PackInit_IntegerType<int,1,1>(link); else if (nInt%1 == 0) PackInit_IntegerType<int,1,0>(link);
} else if (nSignedChar) {
if (nSignedChar == 8) PackInit_IntegerType<SignedChar,8,1>(link); else if (nSignedChar%8 == 0) PackInit_IntegerType<SignedChar,8,0>(link);
else if (nSignedChar == 4) PackInit_IntegerType<SignedChar,4,1>(link); else if (nSignedChar%4 == 0) PackInit_IntegerType<SignedChar,4,0>(link);
else if (nSignedChar == 2) PackInit_IntegerType<SignedChar,2,1>(link); else if (nSignedChar%2 == 0) PackInit_IntegerType<SignedChar,2,0>(link);
else if (nSignedChar == 1) PackInit_IntegerType<SignedChar,1,1>(link); else if (nSignedChar%1 == 0) PackInit_IntegerType<SignedChar,1,0>(link);
} else if (nUnsignedChar) {
if (nUnsignedChar == 8) PackInit_IntegerType<UnsignedChar,8,1>(link); else if (nUnsignedChar%8 == 0) PackInit_IntegerType<UnsignedChar,8,0>(link);
else if (nUnsignedChar == 4) PackInit_IntegerType<UnsignedChar,4,1>(link); else if (nUnsignedChar%4 == 0) PackInit_IntegerType<UnsignedChar,4,0>(link);
else if (nUnsignedChar == 2) PackInit_IntegerType<UnsignedChar,2,1>(link); else if (nUnsignedChar%2 == 0) PackInit_IntegerType<UnsignedChar,2,0>(link);
else if (nUnsignedChar == 1) PackInit_IntegerType<UnsignedChar,1,1>(link); else if (nUnsignedChar%1 == 0) PackInit_IntegerType<UnsignedChar,1,0>(link);
#if defined(PETSC_HAVE_COMPLEX)
} else if (nPetscComplex) {
if (nPetscComplex == 8) PackInit_ComplexType<PetscComplex,8,1>(link); else if (nPetscComplex%8 == 0) PackInit_ComplexType<PetscComplex,8,0>(link);
else if (nPetscComplex == 4) PackInit_ComplexType<PetscComplex,4,1>(link); else if (nPetscComplex%4 == 0) PackInit_ComplexType<PetscComplex,4,0>(link);
else if (nPetscComplex == 2) PackInit_ComplexType<PetscComplex,2,1>(link); else if (nPetscComplex%2 == 0) PackInit_ComplexType<PetscComplex,2,0>(link);
else if (nPetscComplex == 1) PackInit_ComplexType<PetscComplex,1,1>(link); else if (nPetscComplex%1 == 0) PackInit_ComplexType<PetscComplex,1,0>(link);
#endif
} else {
MPI_Aint lb,nbyte;
ierr = MPI_Type_get_extent(unit,&lb,&nbyte);CHKERRMPI(ierr);
if (lb != 0) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_SUP,"Datatype with nonzero lower bound %ld\n",(long)lb);
if (nbyte % sizeof(int)) { /* If the type size is not multiple of int */
if (nbyte == 4) PackInit_DumbType<char,4,1>(link); else if (nbyte%4 == 0) PackInit_DumbType<char,4,0>(link);
else if (nbyte == 2) PackInit_DumbType<char,2,1>(link); else if (nbyte%2 == 0) PackInit_DumbType<char,2,0>(link);
else if (nbyte == 1) PackInit_DumbType<char,1,1>(link); else if (nbyte%1 == 0) PackInit_DumbType<char,1,0>(link);
} else {
nInt = nbyte / sizeof(int);
if (nInt == 8) PackInit_DumbType<int,8,1>(link); else if (nInt%8 == 0) PackInit_DumbType<int,8,0>(link);
else if (nInt == 4) PackInit_DumbType<int,4,1>(link); else if (nInt%4 == 0) PackInit_DumbType<int,4,0>(link);
else if (nInt == 2) PackInit_DumbType<int,2,1>(link); else if (nInt%2 == 0) PackInit_DumbType<int,2,0>(link);
else if (nInt == 1) PackInit_DumbType<int,1,1>(link); else if (nInt%1 == 0) PackInit_DumbType<int,1,0>(link);
}
}
if (!sf->maxResidentThreadsPerGPU) { /* Not initialized */
int device;
struct cudaDeviceProp props;
cerr = cudaGetDevice(&device);CHKERRCUDA(cerr);
cerr = cudaGetDeviceProperties(&props,device);CHKERRCUDA(cerr);
sf->maxResidentThreadsPerGPU = props.maxThreadsPerMultiProcessor*props.multiProcessorCount;
}
link->maxResidentThreadsPerGPU = sf->maxResidentThreadsPerGPU;
link->stream = PetscDefaultCudaStream;
link->Destroy = PetscSFLinkDestroy_MPI_CUDA;
link->SyncDevice = PetscSFLinkSyncDevice_CUDA;
link->SyncStream = PetscSFLinkSyncStream_CUDA;
link->Memcpy = PetscSFLinkMemcpy_CUDA;
link->deviceinited = PETSC_TRUE;
PetscFunctionReturn(0);
}
|
13ce240ee6087d6d9ebcb6fa9893574d95d326e7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <ATen/ATen.h>
#include <THH/THHAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void SplitRepscoreForward(const int nthreads,
const scalar_t *repscore_map,
const int *region_map,
const int length,
scalar_t *pric_table) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int py = index % length; //position width
int cluster_ind = index / length; //position cluster_ind
if (region_map[py] == cluster_ind){
scalar_t *offset_pric_table = pric_table + cluster_ind * length + py;
atomicAdd(offset_pric_table, repscore_map[py]);
}
}
}
int SplitRepscoreForwardLaucher(const at::Tensor repscore_map,
const at::Tensor region_map,
const int cluster,
const int length,
at::Tensor pric_table) {
const int output_size = cluster * length;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
repscore_map.type(), "SplitRepscoreLaucherForward", ([&] {
const scalar_t *repscore_map_data = repscore_map.data<scalar_t>();
const int *region_map_data = region_map.data<int>();
scalar_t *pric_table_data = pric_table.data<scalar_t>();
hipLaunchKernelGGL(( SplitRepscoreForward<scalar_t>)
, dim3(GET_BLOCKS(output_size)), dim3(THREADS_PER_BLOCK), 0, 0,
output_size, repscore_map_data, region_map_data, length, pric_table_data);
}));
THCudaCheck(hipGetLastError());
return 1;
} | 13ce240ee6087d6d9ebcb6fa9893574d95d326e7.cu | #include <ATen/ATen.h>
#include <THC/THCAtomics.cuh>
#define CUDA_1D_KERNEL_LOOP(i, n) \
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
i += blockDim.x * gridDim.x)
#define THREADS_PER_BLOCK 1024
inline int GET_BLOCKS(const int N) {
int optimal_block_num = (N + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK;
int max_block_num = 65000;
return min(optimal_block_num, max_block_num);
}
template <typename scalar_t>
__global__ void SplitRepscoreForward(const int nthreads,
const scalar_t *repscore_map,
const int *region_map,
const int length,
scalar_t *pric_table) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int py = index % length; //position width
int cluster_ind = index / length; //position cluster_ind
if (region_map[py] == cluster_ind){
scalar_t *offset_pric_table = pric_table + cluster_ind * length + py;
atomicAdd(offset_pric_table, repscore_map[py]);
}
}
}
int SplitRepscoreForwardLaucher(const at::Tensor repscore_map,
const at::Tensor region_map,
const int cluster,
const int length,
at::Tensor pric_table) {
const int output_size = cluster * length;
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
repscore_map.type(), "SplitRepscoreLaucherForward", ([&] {
const scalar_t *repscore_map_data = repscore_map.data<scalar_t>();
const int *region_map_data = region_map.data<int>();
scalar_t *pric_table_data = pric_table.data<scalar_t>();
SplitRepscoreForward<scalar_t>
<<<GET_BLOCKS(output_size), THREADS_PER_BLOCK>>>(
output_size, repscore_map_data, region_map_data, length, pric_table_data);
}));
THCudaCheck(cudaGetLastError());
return 1;
} |
cf4b6873a528912c63aba1a0884ccd4c76b03a85.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel5_plus_4_b [3][2];
static int dims_update_halo_kernel5_plus_4_b_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel5_plus_4_b_gpu(ACC<double> &vol_flux_z,
ACC<double> &mass_flux_z,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = vol_flux_z(0,-4,0);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = mass_flux_z(0,-4,0);
}
__global__ void ops_update_halo_kernel5_plus_4_b(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_4_b[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_4_b[0][0] * dims_update_halo_kernel5_plus_4_b[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_4_b[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_4_b[1][0] * dims_update_halo_kernel5_plus_4_b[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel5_plus_4_b[0][0], dims_update_halo_kernel5_plus_4_b[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel5_plus_4_b[1][0], dims_update_halo_kernel5_plus_4_b[1][1], arg1);
update_halo_kernel5_plus_4_b_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_b_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,86)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(86,"update_halo_kernel5_plus_4_b");
OPS_kernels[86].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel5_plus_4_b_h[0][0] || ydim0 != dims_update_halo_kernel5_plus_4_b_h[0][1] || xdim1 != dims_update_halo_kernel5_plus_4_b_h[1][0] || ydim1 != dims_update_halo_kernel5_plus_4_b_h[1][1]) {
dims_update_halo_kernel5_plus_4_b_h[0][0] = xdim0;
dims_update_halo_kernel5_plus_4_b_h[0][1] = ydim0;
dims_update_halo_kernel5_plus_4_b_h[1][0] = xdim1;
dims_update_halo_kernel5_plus_4_b_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel5_plus_4_b, dims_update_halo_kernel5_plus_4_b_h, sizeof(dims_update_halo_kernel5_plus_4_b)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[86].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_b), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[86].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[86].mpi_time += t2-t1;
OPS_kernels[86].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[86].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 86;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 86;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_b_execute;
if (OPS_diags > 1) {
ops_timing_realloc(86,"update_halo_kernel5_plus_4_b");
}
ops_enqueue_kernel(desc);
}
#endif
| cf4b6873a528912c63aba1a0884ccd4c76b03a85.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel5_plus_4_b [3][2];
static int dims_update_halo_kernel5_plus_4_b_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel5_plus_4_b_gpu(ACC<double> &vol_flux_z,
ACC<double> &mass_flux_z,
const int* fields) {
if(fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z(0,0,0) = vol_flux_z(0,-4,0);
if(fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z(0,0,0) = mass_flux_z(0,-4,0);
}
__global__ void ops_update_halo_kernel5_plus_4_b(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_4_b[0][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_4_b[0][0] * dims_update_halo_kernel5_plus_4_b[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel5_plus_4_b[1][0] + idx_z * 1*1 * dims_update_halo_kernel5_plus_4_b[1][0] * dims_update_halo_kernel5_plus_4_b[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel5_plus_4_b[0][0], dims_update_halo_kernel5_plus_4_b[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel5_plus_4_b[1][0], dims_update_halo_kernel5_plus_4_b[1][1], arg1);
update_halo_kernel5_plus_4_b_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel5_plus_4_b_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,86)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(86,"update_halo_kernel5_plus_4_b");
OPS_kernels[86].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel5_plus_4_b_h[0][0] || ydim0 != dims_update_halo_kernel5_plus_4_b_h[0][1] || xdim1 != dims_update_halo_kernel5_plus_4_b_h[1][0] || ydim1 != dims_update_halo_kernel5_plus_4_b_h[1][1]) {
dims_update_halo_kernel5_plus_4_b_h[0][0] = xdim0;
dims_update_halo_kernel5_plus_4_b_h[0][1] = ydim0;
dims_update_halo_kernel5_plus_4_b_h[1][0] = xdim1;
dims_update_halo_kernel5_plus_4_b_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel5_plus_4_b, dims_update_halo_kernel5_plus_4_b_h, sizeof(dims_update_halo_kernel5_plus_4_b)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[86].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel5_plus_4_b<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[86].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[86].mpi_time += t2-t1;
OPS_kernels[86].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[86].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel5_plus_4_b(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 86;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 86;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel5_plus_4_b_execute;
if (OPS_diags > 1) {
ops_timing_realloc(86,"update_halo_kernel5_plus_4_b");
}
ops_enqueue_kernel(desc);
}
#endif
|
103a95d634c82f8114e98350a300aff4202e8223.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
const int TILE_DIM = 16;
float p_val = 0;
int row = blockIdx.y* TILE_DIM + threadIdx.y;
int col = blockIdx.x* TILE_DIM + threadIdx.x;
__shared__ float mt [TILE_DIM+1][TILE_DIM];
__shared__ float nt [TILE_DIM+1][TILE_DIM];
for (int k = 0 ; k < (TILE_DIM + M.width -1)/ TILE_DIM; k++){
// ensure that the data is within the scope of the matrix
// not inside the scope when block too big for tile
if ( k* TILE_DIM + threadIdx.x < M.width && row < M.height)
mt[threadIdx.y][threadIdx.x] = M.elements[row*M.width + k * TILE_DIM + threadIdx.x];
else
mt[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N.height && col < N.width)
nt[threadIdx.y][threadIdx.x] = N.elements[ (k*TILE_DIM + threadIdx.y) * N.width + col];
else
nt[threadIdx.y][threadIdx.x] = 0.0;
//after everything is inserted into tile
__syncthreads();
// calculate the p_val
for (int n = 0; n < TILE_DIM; ++n)
p_val += mt[threadIdx.y][n] * nt[n][threadIdx.x];
__syncthreads();
}
//update the P matrix with the values
if ( row < P.height && col < P.width)
P.elements[row*P.width+col] = p_val;
//P.elements[((blockIdx.y*blockDim.y+threadIdx.y)*P.width)+ (blockIdx.x*blockDim.x) + threadIdx.x] = p_val;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
| 103a95d634c82f8114e98350a300aff4202e8223.cu | /*
* Copyright 1993-2006 NVIDIA Corporation. All rights reserved.
*
* NOTICE TO USER:
*
* This source code is subject to NVIDIA ownership rights under U.S. and
* international Copyright laws.
*
* This software and the information contained herein is PROPRIETARY and
* CONFIDENTIAL to NVIDIA and is being provided under the terms and
* conditions of a Non-Disclosure Agreement. Any reproduction or
* disclosure to any third party without the express written consent of
* NVIDIA is prohibited.
*
* NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE
* CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR
* IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
* IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL,
* OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
* OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
* OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
* OR PERFORMANCE OF THIS SOURCE CODE.
*
* U.S. Government End Users. This source code is a "commercial item" as
* that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of
* "commercial computer software" and "commercial computer software
* documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995)
* and is provided to the U.S. Government only as a commercial end item.
* Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through
* 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the
* source code with only those rights set forth herein.
*/
/* Matrix multiplication: C = A * B.
* Device code.
*/
#ifndef _MATRIXMUL_KERNEL_H_
#define _MATRIXMUL_KERNEL_H_
#include <stdio.h>
#include "matrixmul.h"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
// Matrix multiplication kernel thread specification
__global__ void MatrixMulKernel(Matrix M, Matrix N, Matrix P)
{
const int TILE_DIM = 16;
float p_val = 0;
int row = blockIdx.y* TILE_DIM + threadIdx.y;
int col = blockIdx.x* TILE_DIM + threadIdx.x;
__shared__ float mt [TILE_DIM+1][TILE_DIM];
__shared__ float nt [TILE_DIM+1][TILE_DIM];
for (int k = 0 ; k < (TILE_DIM + M.width -1)/ TILE_DIM; k++){
// ensure that the data is within the scope of the matrix
// not inside the scope when block too big for tile
if ( k* TILE_DIM + threadIdx.x < M.width && row < M.height)
mt[threadIdx.y][threadIdx.x] = M.elements[row*M.width + k * TILE_DIM + threadIdx.x];
else
mt[threadIdx.y][threadIdx.x] = 0.0;
if (k*TILE_DIM + threadIdx.y < N.height && col < N.width)
nt[threadIdx.y][threadIdx.x] = N.elements[ (k*TILE_DIM + threadIdx.y) * N.width + col];
else
nt[threadIdx.y][threadIdx.x] = 0.0;
//after everything is inserted into tile
__syncthreads();
// calculate the p_val
for (int n = 0; n < TILE_DIM; ++n)
p_val += mt[threadIdx.y][n] * nt[n][threadIdx.x];
__syncthreads();
}
//update the P matrix with the values
if ( row < P.height && col < P.width)
P.elements[row*P.width+col] = p_val;
//P.elements[((blockIdx.y*blockDim.y+threadIdx.y)*P.width)+ (blockIdx.x*blockDim.x) + threadIdx.x] = p_val;
}
#endif // #ifndef _MATRIXMUL_KERNEL_H_
|
d58f32a966a23f2863cb0bb79e37d9f000c0b928.hip | // !!! This is a file automatically generated by hipify!!!
// This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n)
{
// compute the global element index this thread should process
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
void vector_add_serial(const float *a,
const float *b,
float *c,
const size_t n)
{
for (size_t i = 0; i < n; i++) {
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *serial_a = 0;
float *serial_b = 0;
float *serial_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
serial_a = (float*)malloc(num_bytes);
serial_b = (float*)malloc(num_bytes);
serial_c = (float*)malloc(num_bytes);
// hipMalloc the device arrays
hipMalloc((void**)&device_array_a, num_bytes);
hipMalloc((void**)&device_array_b, num_bytes);
hipMalloc((void**)&device_array_c, num_bytes);
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_array_b == 0 || host_array_c == 0 ||
device_array_a == 0 || device_array_b == 0 || device_array_c == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
serial_a[i] = host_array_a[i];
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
serial_b[i] = host_array_b[i];
}
// copy arrays a & b to the device memory space
hipMemcpy(device_array_a, host_array_a, num_bytes, hipMemcpyHostToDevice);
hipMemcpy(device_array_b, host_array_b, num_bytes, hipMemcpyHostToDevice);
// compute c = a + b on the device
const size_t block_size = 256;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// time the kernel launches using CUDA events
hipEvent_t launch_begin, launch_end;
hipEventCreate(&launch_begin);
hipEventCreate(&launch_end);
// record a CUDA event immediately before and after the kernel launch
hipEventRecord(launch_begin,0);
// launch the kernel
hipLaunchKernelGGL(( vector_add), dim3(grid_size), dim3(block_size), 0, 0, device_array_a, device_array_b, device_array_c, num_elements);
hipEventRecord(launch_end,0);
hipEventSynchronize(launch_end);
// measure the time (ms) spent in the kernel
float time = 0;
hipEventElapsedTime(&time, launch_begin, launch_end);
// copy the result back to the host memory space
hipMemcpy(host_array_c, device_array_c, num_bytes, hipMemcpyDeviceToHost);
printf("Kernel run time: %fms\n", time);
clock_t begin = clock();
vector_add_serial(serial_a, serial_b, serial_c, num_elements);
clock_t end = clock();
double elapsedTime = (double)(end-begin)/CLOCKS_PER_SEC*1000;
printf("Serial run time: %fms\n", elapsedTime);
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
free(serial_a);
free(serial_b);
free(serial_c);
hipFree(device_array_a);
hipFree(device_array_b);
hipFree(device_array_c);
}
| d58f32a966a23f2863cb0bb79e37d9f000c0b928.cu | // This example demonstrates parallel floating point vector
// addition with a simple __global__ function.
#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include <cuda_runtime.h>
// this kernel computes the vector sum c = a + b
// each thread performs one pair-wise addition
__global__ void vector_add(const float *a,
const float *b,
float *c,
const size_t n)
{
// compute the global element index this thread should process
unsigned int i = threadIdx.x + blockDim.x * blockIdx.x;
// avoid accessing out of bounds elements
if(i < n)
{
// sum elements
c[i] = a[i] + b[i];
}
}
void vector_add_serial(const float *a,
const float *b,
float *c,
const size_t n)
{
for (size_t i = 0; i < n; i++) {
c[i] = a[i] + b[i];
}
}
int main(void)
{
// create arrays of 1M elements
const int num_elements = 1<<20;
// compute the size of the arrays in bytes
const int num_bytes = num_elements * sizeof(float);
// points to host & device arrays
float *device_array_a = 0;
float *device_array_b = 0;
float *device_array_c = 0;
float *serial_a = 0;
float *serial_b = 0;
float *serial_c = 0;
float *host_array_a = 0;
float *host_array_b = 0;
float *host_array_c = 0;
// malloc the host arrays
host_array_a = (float*)malloc(num_bytes);
host_array_b = (float*)malloc(num_bytes);
host_array_c = (float*)malloc(num_bytes);
serial_a = (float*)malloc(num_bytes);
serial_b = (float*)malloc(num_bytes);
serial_c = (float*)malloc(num_bytes);
// cudaMalloc the device arrays
cudaMalloc((void**)&device_array_a, num_bytes);
cudaMalloc((void**)&device_array_b, num_bytes);
cudaMalloc((void**)&device_array_c, num_bytes);
// if any memory allocation failed, report an error message
if(host_array_a == 0 || host_array_b == 0 || host_array_c == 0 ||
device_array_a == 0 || device_array_b == 0 || device_array_c == 0)
{
printf("couldn't allocate memory\n");
return 1;
}
// initialize host_array_a & host_array_b
for(int i = 0; i < num_elements; ++i)
{
// make array a a linear ramp
host_array_a[i] = (float)i;
serial_a[i] = host_array_a[i];
// make array b random
host_array_b[i] = (float)rand() / RAND_MAX;
serial_b[i] = host_array_b[i];
}
// copy arrays a & b to the device memory space
cudaMemcpy(device_array_a, host_array_a, num_bytes, cudaMemcpyHostToDevice);
cudaMemcpy(device_array_b, host_array_b, num_bytes, cudaMemcpyHostToDevice);
// compute c = a + b on the device
const size_t block_size = 256;
size_t grid_size = num_elements / block_size;
// deal with a possible partial final block
if(num_elements % block_size) ++grid_size;
// time the kernel launches using CUDA events
cudaEvent_t launch_begin, launch_end;
cudaEventCreate(&launch_begin);
cudaEventCreate(&launch_end);
// record a CUDA event immediately before and after the kernel launch
cudaEventRecord(launch_begin,0);
// launch the kernel
vector_add<<<grid_size, block_size>>>(device_array_a, device_array_b, device_array_c, num_elements);
cudaEventRecord(launch_end,0);
cudaEventSynchronize(launch_end);
// measure the time (ms) spent in the kernel
float time = 0;
cudaEventElapsedTime(&time, launch_begin, launch_end);
// copy the result back to the host memory space
cudaMemcpy(host_array_c, device_array_c, num_bytes, cudaMemcpyDeviceToHost);
printf("Kernel run time: %fms\n", time);
clock_t begin = clock();
vector_add_serial(serial_a, serial_b, serial_c, num_elements);
clock_t end = clock();
double elapsedTime = (double)(end-begin)/CLOCKS_PER_SEC*1000;
printf("Serial run time: %fms\n", elapsedTime);
// deallocate memory
free(host_array_a);
free(host_array_b);
free(host_array_c);
free(serial_a);
free(serial_b);
free(serial_c);
cudaFree(device_array_a);
cudaFree(device_array_b);
cudaFree(device_array_c);
}
|
c8f406d93b629d340c777fa1a684447aa969f66c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmult!"
{
}
__global__ void diag_kernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i]=a[i+i*lengthA];
}
} | c8f406d93b629d340c777fa1a684447aa969f66c.cu | #include "includes.h"
// filename: vmult!.cu
// a simple CUDA kernel to element multiply two vectors C=alpha*A.*B
extern "C" // ensure function name to be exactly "vmult!"
{
}
__global__ void diag_kernel(const int lengthA, const double *a, double *b)
{
int i = threadIdx.x + blockIdx.x * blockDim.x;
if (i<lengthA)
{
b[i]=a[i+i*lengthA];
}
} |
fe57174cc14f3072cb164e99e85675733dbd5aaa.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "OneHotKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *params = NULL;
hipMalloc(¶ms, XSIZE*YSIZE);
int64_t num_features = 1;
int embed_size = XSIZE*YSIZE;
int batch_size = XSIZE*YSIZE;
const int64_t *indices = NULL;
hipMalloc(&indices, XSIZE*YSIZE);
float *ret = NULL;
hipMalloc(&ret, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
OneHotKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, params,num_features,embed_size,batch_size,indices,ret);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
OneHotKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, params,num_features,embed_size,batch_size,indices,ret);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
OneHotKernel), dim3(gridBlock),dim3(threadBlock), 0, 0, params,num_features,embed_size,batch_size,indices,ret);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | fe57174cc14f3072cb164e99e85675733dbd5aaa.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "OneHotKernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const float *params = NULL;
cudaMalloc(¶ms, XSIZE*YSIZE);
int64_t num_features = 1;
int embed_size = XSIZE*YSIZE;
int batch_size = XSIZE*YSIZE;
const int64_t *indices = NULL;
cudaMalloc(&indices, XSIZE*YSIZE);
float *ret = NULL;
cudaMalloc(&ret, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
OneHotKernel<<<gridBlock,threadBlock>>>(params,num_features,embed_size,batch_size,indices,ret);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
OneHotKernel<<<gridBlock,threadBlock>>>(params,num_features,embed_size,batch_size,indices,ret);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
OneHotKernel<<<gridBlock,threadBlock>>>(params,num_features,embed_size,batch_size,indices,ret);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
09ecde1c8cf7cc9a042ba4c33718ae50d6af6d7e.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "scale_mask_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float mask_num = 1;
float *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
scale_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,mask_num,mask,scale);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
scale_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,mask_num,mask,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
scale_mask_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,mask_num,mask,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 09ecde1c8cf7cc9a042ba4c33718ae50d6af6d7e.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "scale_mask_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float mask_num = 1;
float *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
float scale = 2;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
scale_mask_kernel<<<gridBlock,threadBlock>>>(n,x,mask_num,mask,scale);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
scale_mask_kernel<<<gridBlock,threadBlock>>>(n,x,mask_num,mask,scale);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
scale_mask_kernel<<<gridBlock,threadBlock>>>(n,x,mask_num,mask,scale);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
a65967da3df93a64989d6b7afa09a3b5bac4016c.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
*************************************************************************
unsigned int width = gridDim.x * blockDim.x;
unsigned int height = gridDim.y * blockDim.y;
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int kn = y * width + x;
*************************************************************************
*/
#include <stdio.h>
#include <GPU.h>
#include <App.h>
//Essas variveis s podem ser globais!!!!
texture<float, 2> mTexRef;
hipChannelFormatDesc mChannelDesc = hipCreateChannelDesc<float>();
hipArray *mCUMatrizA = NULL;
__global__
void kernel (float *vetorB,
float *vetorA,
const int colunas,
const int linhas)
{
//unsigned int m = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int n = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int m = 0;
// float4 fx = tex2D(mTexRef, x, y );
//unsigned int n = blockDim.y * blockIdx.y + threadIdx.y;
//unsigned int mn = n * width + m;
vetorB[n] = 0.0f ; //tex2D(mTexRef, 6, 1 ) ;
for (m = 0; m < colunas; m++)
vetorB[n] += tex2D(mTexRef, m, n ) * vetorA[m];
}
//----------------------------------------------------------------------------------------------------
extern "C" void multiplicaMatrizVetor(float *vetorB,
float *MatrizA,
float *vetorA,
int colunas,
int linhas)
{
dim3 dGrid,
dThreads;
unsigned int uMemMatrizA = sizeof(float) * colunas * linhas,
uMemVetorA = sizeof(float) * colunas,
uMemVetorB = sizeof(float) * linhas;
float *fGPUVetorB = NULL,
*fGPUVetorA = NULL;
Stopwatch sMemoria,
sGPU;
dGrid.x = 1; //BLOCK_SIZE;
dGrid.y = BLOCK_SIZE;
dGrid.z = 1;
dThreads.x = 1; //colunas / BLOCK_SIZE;
dThreads.y = linhas / BLOCK_SIZE;
dThreads.z = 1;
FREQUENCY(sMemoria);
FREQUENCY(sGPU);
START_STOPWATCH(sMemoria);
//Aloca memria na GPU
CHECK_ERROR(hipMallocArray(&mCUMatrizA, &mChannelDesc, colunas, linhas));
CHECK_ERROR(hipMalloc((void**) &fGPUVetorA, uMemVetorA));
CHECK_ERROR(hipMalloc((void**) &fGPUVetorB, uMemVetorB));
//Copiando dados CPU --> GPU
CHECK_ERROR(hipMemcpyToArray(mCUMatrizA, 0, 0, MatrizA, uMemMatrizA, hipMemcpyHostToDevice));
CHECK_ERROR(hipMemcpy( fGPUVetorA, vetorA, uMemVetorA, hipMemcpyHostToDevice));
CHECK_ERROR(hipBindTextureToArray(mTexRef, mCUMatrizA)); //Bind da textura
START_STOPWATCH(sGPU)
hipLaunchKernelGGL(( kernel), dim3(dGrid), dim3(dThreads), 0, 0,
fGPUVetorB, fGPUVetorA, colunas, linhas);
CHECK_ERROR(hipDeviceSynchronize());
STOP_STOPWATCH(sGPU);
CHECK_ERROR(hipUnbindTexture(mTexRef)); //Unbind da textura
CHECK_ERROR(hipMemcpy(vetorB, fGPUVetorB, uMemVetorB, hipMemcpyDeviceToHost));
//Desaloca a memria
CHECK_ERROR(hipFreeArray(mCUMatrizA));
CHECK_ERROR(hipFree(fGPUVetorA));
CHECK_ERROR(hipFree(fGPUVetorB));
STOP_STOPWATCH(sMemoria);
sMemoria.mElapsedTime -= sGPU.mElapsedTime;
fprintf(stdout, "\n");
fprintf(stdout, "\nTotal de memoria alocada na GPU: %u bytes", uMemMatrizA + uMemVetorA + uMemVetorB);
fprintf(stdout, "\n Tempo gasto no processamento: %.4lf (ms) ", sGPU.mElapsedTime);
fprintf(stdout, "\nTempo gasto com alocacao / copia de mem.: %.4lf (ms) ", sMemoria.mElapsedTime);
fprintf(stdout, "\n Total de tempo gasto: %.4lf (ms) ", sMemoria.mElapsedTime + sGPU.mElapsedTime);
fprintf(stdout, "\n");
}
| a65967da3df93a64989d6b7afa09a3b5bac4016c.cu | /*
*************************************************************************
unsigned int width = gridDim.x * blockDim.x;
unsigned int height = gridDim.y * blockDim.y;
unsigned int x = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int y = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int kn = y * width + x;
*************************************************************************
*/
#include <stdio.h>
#include <GPU.h>
#include <App.h>
//Essas variáveis só podem ser globais!!!!
texture<float, 2> mTexRef;
cudaChannelFormatDesc mChannelDesc = cudaCreateChannelDesc<float>();
cudaArray *mCUMatrizA = NULL;
__global__
void kernel (float *vetorB,
float *vetorA,
const int colunas,
const int linhas)
{
//unsigned int m = blockDim.x * blockIdx.x + threadIdx.x;
unsigned int n = blockDim.y * blockIdx.y + threadIdx.y;
unsigned int m = 0;
// float4 fx = tex2D(mTexRef, x, y );
//unsigned int n = blockDim.y * blockIdx.y + threadIdx.y;
//unsigned int mn = n * width + m;
vetorB[n] = 0.0f ; //tex2D(mTexRef, 6, 1 ) ;
for (m = 0; m < colunas; m++)
vetorB[n] += tex2D(mTexRef, m, n ) * vetorA[m];
}
//----------------------------------------------------------------------------------------------------
extern "C" void multiplicaMatrizVetor(float *vetorB,
float *MatrizA,
float *vetorA,
int colunas,
int linhas)
{
dim3 dGrid,
dThreads;
unsigned int uMemMatrizA = sizeof(float) * colunas * linhas,
uMemVetorA = sizeof(float) * colunas,
uMemVetorB = sizeof(float) * linhas;
float *fGPUVetorB = NULL,
*fGPUVetorA = NULL;
Stopwatch sMemoria,
sGPU;
dGrid.x = 1; //BLOCK_SIZE;
dGrid.y = BLOCK_SIZE;
dGrid.z = 1;
dThreads.x = 1; //colunas / BLOCK_SIZE;
dThreads.y = linhas / BLOCK_SIZE;
dThreads.z = 1;
FREQUENCY(sMemoria);
FREQUENCY(sGPU);
START_STOPWATCH(sMemoria);
//Aloca memória na GPU
CHECK_ERROR(cudaMallocArray(&mCUMatrizA, &mChannelDesc, colunas, linhas));
CHECK_ERROR(cudaMalloc((void**) &fGPUVetorA, uMemVetorA));
CHECK_ERROR(cudaMalloc((void**) &fGPUVetorB, uMemVetorB));
//Copiando dados CPU --> GPU
CHECK_ERROR(cudaMemcpyToArray(mCUMatrizA, 0, 0, MatrizA, uMemMatrizA, cudaMemcpyHostToDevice));
CHECK_ERROR(cudaMemcpy( fGPUVetorA, vetorA, uMemVetorA, cudaMemcpyHostToDevice));
CHECK_ERROR(cudaBindTextureToArray(mTexRef, mCUMatrizA)); //Bind da textura
START_STOPWATCH(sGPU)
kernel<<<dGrid, dThreads>>>
(fGPUVetorB, fGPUVetorA, colunas, linhas);
CHECK_ERROR(cudaThreadSynchronize());
STOP_STOPWATCH(sGPU);
CHECK_ERROR(cudaUnbindTexture(mTexRef)); //Unbind da textura
CHECK_ERROR(cudaMemcpy(vetorB, fGPUVetorB, uMemVetorB, cudaMemcpyDeviceToHost));
//Desaloca a memória
CHECK_ERROR(cudaFreeArray(mCUMatrizA));
CHECK_ERROR(cudaFree(fGPUVetorA));
CHECK_ERROR(cudaFree(fGPUVetorB));
STOP_STOPWATCH(sMemoria);
sMemoria.mElapsedTime -= sGPU.mElapsedTime;
fprintf(stdout, "\n");
fprintf(stdout, "\nTotal de memoria alocada na GPU: %u bytes", uMemMatrizA + uMemVetorA + uMemVetorB);
fprintf(stdout, "\n Tempo gasto no processamento: %.4lf (ms) ", sGPU.mElapsedTime);
fprintf(stdout, "\nTempo gasto com alocacao / copia de mem.: %.4lf (ms) ", sMemoria.mElapsedTime);
fprintf(stdout, "\n Total de tempo gasto: %.4lf (ms) ", sMemoria.mElapsedTime + sGPU.mElapsedTime);
fprintf(stdout, "\n");
}
|
08d550b2fbdbae15a92948bf57b7db5ddbb62187.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void get_iou_cuda_(int nInstance, int nProposal, int *proposals_idx, int *proposals_offset, long *instance_labels, int *instance_pointnum, float *proposals_iou){
for(int proposal_id = blockIdx.x; proposal_id < nProposal; proposal_id += gridDim.x){
int start = proposals_offset[proposal_id];
int end = proposals_offset[proposal_id + 1];
int proposal_total = end - start;
for(int instance_id = threadIdx.x; instance_id < nInstance; instance_id += blockDim.x){
int instance_total = instance_pointnum[instance_id];
int intersection = 0;
for(int i = start; i < end; i++){
int idx = proposals_idx[i];
if((int)instance_labels[idx] == instance_id){
intersection += 1;
}
}
proposals_iou[proposal_id * nInstance + instance_id] = (float)intersection / ((float)(proposal_total + instance_total - intersection) + 1e-5);
}
}
} | 08d550b2fbdbae15a92948bf57b7db5ddbb62187.cu | #include "includes.h"
__global__ void get_iou_cuda_(int nInstance, int nProposal, int *proposals_idx, int *proposals_offset, long *instance_labels, int *instance_pointnum, float *proposals_iou){
for(int proposal_id = blockIdx.x; proposal_id < nProposal; proposal_id += gridDim.x){
int start = proposals_offset[proposal_id];
int end = proposals_offset[proposal_id + 1];
int proposal_total = end - start;
for(int instance_id = threadIdx.x; instance_id < nInstance; instance_id += blockDim.x){
int instance_total = instance_pointnum[instance_id];
int intersection = 0;
for(int i = start; i < end; i++){
int idx = proposals_idx[i];
if((int)instance_labels[idx] == instance_id){
intersection += 1;
}
}
proposals_iou[proposal_id * nInstance + instance_id] = (float)intersection / ((float)(proposal_total + instance_total - intersection) + 1e-5);
}
}
} |
ef5fad6678aec9071fa5a1900fc0e6925ee7670a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include "wave.h"
__global__
void calculate(int width, int frames, unsigned char* pic)
{
//Indice de segmento dentro do bloco
int index = threadIdx.x;
//This variable and contains the dimensions of the block.
int offset = blockDim.x;
for (int frame = index; frame < frames; frame += offset) {
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
}
int main(int argc, char *argv[])
{
// check command line
if (argc != 3) {
std::cout << "usage: " << std::endl;
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
std::cout << "error: frame_width must be at least 100\n" << std::endl;
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
std::cout << "error: num_frames must be at least 1\n" << std::endl;
exit(-1);
}
// std::cout << "computing " << std::cout << frames << std::cout << " of " std::cout << " picture " << std::cout << width << std::cout << " picture " << std::endl;
printf("computing %d frames of %d by %d picture\n", frames, width, width);
unsigned char* pic;
//Allocating Unified Memory is as simple as replacing calls to malloc() or new with calls to hipMallocManaged(), an allocation function that returns a pointer accessible from any processor (ptr in the following).
hipMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// start time
timeval start, end;
gettimeofday(&start, NULL);
// calculate threads for frames
hipLaunchKernelGGL(( calculate), dim3(1), dim3(frames), 0, 0, width, frames, pic);
// hipDeviceSynchronize() will force the program to ensure the stream(s)'s kernels/memcpys are complete before continuing
hipDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
//std::cout << "compute time: " << runtime << std::endl;
// verify result by writing frames to BMP files
if ((width <= 256) && (frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
// sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
hipFree(pic);
return 0;
}
| ef5fad6678aec9071fa5a1900fc0e6925ee7670a.cu | #include <iostream>
#include <sys/time.h>
#include <stdio.h>
#include <math.h>
#include "wave.h"
__global__
void calculate(int width, int frames, unsigned char* pic)
{
//Indice de segmento dentro do bloco
int index = threadIdx.x;
//This variable and contains the dimensions of the block.
int offset = blockDim.x;
for (int frame = index; frame < frames; frame += offset) {
for (int row = 0; row < width; row++) {
for (int col = 0; col < width; col++) {
float fx = col - 1024/2;
float fy = row - 1024/2;
float d = sqrtf( fx * fx + fy * fy );
unsigned char color = (unsigned char) (160.0f + 127.0f *
cos(d/10.0f - frame/7.0f) /
(d/50.0f + 1.0f));
pic[frame * width * width + row * width + col] = (unsigned char) color;
}
}
}
}
int main(int argc, char *argv[])
{
// check command line
if (argc != 3) {
std::cout << "usage: " << std::endl;
exit(-1);
}
int width = atoi(argv[1]);
if (width < 100) {
std::cout << "error: frame_width must be at least 100\n" << std::endl;
exit(-1);
}
int frames = atoi(argv[2]);
if (frames < 1) {
std::cout << "error: num_frames must be at least 1\n" << std::endl;
exit(-1);
}
// std::cout << "computing " << std::cout << frames << std::cout << " of " std::cout << " picture " << std::cout << width << std::cout << " picture " << std::endl;
printf("computing %d frames of %d by %d picture\n", frames, width, width);
unsigned char* pic;
//Allocating Unified Memory is as simple as replacing calls to malloc() or new with calls to cudaMallocManaged(), an allocation function that returns a pointer accessible from any processor (ptr in the following).
cudaMallocManaged(&pic, frames*width*width*sizeof(unsigned char));
// start time
timeval start, end;
gettimeofday(&start, NULL);
// calculate threads for frames
calculate<<<1, frames>>>(width, frames, pic);
// cudaDeviceSynchronize() will force the program to ensure the stream(s)'s kernels/memcpys are complete before continuing
cudaDeviceSynchronize();
// end time
gettimeofday(&end, NULL);
double runtime = end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0;
printf("compute time: %.4f s\n", runtime);
//std::cout << "compute time: " << runtime << std::endl;
// verify result by writing frames to BMP files
if ((width <= 256) && (frames <= 100)) {
for (int frame = 0; frame < frames; frame++) {
char name[32];
// sprintf(name, "wave%d.bmp", frame + 1000);
writeBMP(width, width, &pic[frame * width * width], name);
}
}
cudaFree(pic);
return 0;
}
|
1bba369de58023b988112ddbd5b5c70052195566.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
hipblasOperation_t cuTransB =
(TransB == CblasNoTrans) ? HIPBLAS_OP_N : HIPBLAS_OP_T;
CUBLAS_CHECK(hipblasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
hipblasOperation_t cuTransA =
(TransA == CblasNoTrans) ? HIPBLAS_OP_T : HIPBLAS_OP_N;
CUBLAS_CHECK(hipblasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(hipblasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(hipblasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(hipMemcpy(Y, X, N, hipMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(hipblasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(hipblasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(hipblasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(hipblasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(hipblasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(hipblasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(hipblasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(hipMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( set_kernel<Dtype>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_scalar_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( add_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( sub_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( mul_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( div_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( abs_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( exp_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( log_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<float>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( powx_kernel<double>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = ::signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(hiprandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(hiprandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(hiprandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
hiprandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
hiprandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
| 1bba369de58023b988112ddbd5b5c70052195566.cu | #include <math_functions.h> // CUDA's, not caffe's, for fabs, signbit
#include <thrust/device_vector.h>
#include <thrust/functional.h> // thrust::plus
#include <thrust/reduce.h>
#include <cmath>
#include "caffe/common.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <>
void caffe_gpu_gemm<float>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const float alpha, const float* A, const float* B, const float beta,
float* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasSgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemm<double>(const CBLAS_TRANSPOSE TransA,
const CBLAS_TRANSPOSE TransB, const int M, const int N, const int K,
const double alpha, const double* A, const double* B, const double beta,
double* C) {
// Note that cublas follows fortran order.
int lda = (TransA == CblasNoTrans) ? K : M;
int ldb = (TransB == CblasNoTrans) ? N : K;
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
cublasOperation_t cuTransB =
(TransB == CblasNoTrans) ? CUBLAS_OP_N : CUBLAS_OP_T;
CUBLAS_CHECK(cublasDgemm(Caffe::cublas_handle(), cuTransB, cuTransA,
N, M, K, &alpha, B, ldb, A, lda, &beta, C, N));
}
template <>
void caffe_gpu_gemv<float>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const float alpha, const float* A, const float* x,
const float beta, float* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasSgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_gemv<double>(const CBLAS_TRANSPOSE TransA, const int M,
const int N, const double alpha, const double* A, const double* x,
const double beta, double* y) {
cublasOperation_t cuTransA =
(TransA == CblasNoTrans) ? CUBLAS_OP_T : CUBLAS_OP_N;
CUBLAS_CHECK(cublasDgemv(Caffe::cublas_handle(), cuTransA, N, M, &alpha,
A, N, x, 1, &beta, y, 1));
}
template <>
void caffe_gpu_axpy<float>(const int N, const float alpha, const float* X,
float* Y) {
CUBLAS_CHECK(cublasSaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
template <>
void caffe_gpu_axpy<double>(const int N, const double alpha, const double* X,
double* Y) {
CUBLAS_CHECK(cublasDaxpy(Caffe::cublas_handle(), N, &alpha, X, 1, Y, 1));
}
void caffe_gpu_memcpy(const size_t N, const void* X, void* Y) {
if (X != Y) {
CUDA_CHECK(cudaMemcpy(Y, X, N, cudaMemcpyDefault)); // NOLINT(caffe/alt_fn)
}
}
template <>
void caffe_gpu_scal<float>(const int N, const float alpha, float *X) {
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_scal<double>(const int N, const double alpha, double *X) {
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), N, &alpha, X, 1));
}
template <>
void caffe_gpu_axpby<float>(const int N, const float alpha, const float* X,
const float beta, float* Y) {
caffe_gpu_scal<float>(N, beta, Y);
caffe_gpu_axpy<float>(N, alpha, X, Y);
}
template <>
void caffe_gpu_axpby<double>(const int N, const double alpha, const double* X,
const double beta, double* Y) {
caffe_gpu_scal<double>(N, beta, Y);
caffe_gpu_axpy<double>(N, alpha, X, Y);
}
template <>
void caffe_gpu_dot<float>(const int n, const float* x, const float* y,
float* out) {
CUBLAS_CHECK(cublasSdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_dot<double>(const int n, const double* x, const double* y,
double * out) {
CUBLAS_CHECK(cublasDdot(Caffe::cublas_handle(), n, x, 1, y, 1, out));
}
template <>
void caffe_gpu_asum<float>(const int n, const float* x, float* y) {
CUBLAS_CHECK(cublasSasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_asum<double>(const int n, const double* x, double* y) {
CUBLAS_CHECK(cublasDasum(Caffe::cublas_handle(), n, x, 1, y));
}
template <>
void caffe_gpu_scale<float>(const int n, const float alpha, const float *x,
float* y) {
CUBLAS_CHECK(cublasScopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasSscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <>
void caffe_gpu_scale<double>(const int n, const double alpha, const double *x,
double* y) {
CUBLAS_CHECK(cublasDcopy(Caffe::cublas_handle(), n, x, 1, y, 1));
CUBLAS_CHECK(cublasDscal(Caffe::cublas_handle(), n, &alpha, y, 1));
}
template <typename Dtype>
__global__ void set_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = alpha;
}
}
template <typename Dtype>
void caffe_gpu_set(const int N, const Dtype alpha, Dtype* Y) {
if (alpha == 0) {
CUDA_CHECK(cudaMemset(Y, 0, sizeof(Dtype) * N)); // NOLINT(caffe/alt_fn)
return;
}
// NOLINT_NEXT_LINE(whitespace/operators)
set_kernel<Dtype><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template void caffe_gpu_set<int>(const int N, const int alpha, int* Y);
template void caffe_gpu_set<float>(const int N, const float alpha, float* Y);
template void caffe_gpu_set<double>(const int N, const double alpha, double* Y);
template <typename Dtype>
__global__ void add_scalar_kernel(const int n, const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] += alpha;
}
}
template <>
void caffe_gpu_add_scalar(const int N, const float alpha, float* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <>
void caffe_gpu_add_scalar(const int N, const double alpha, double* Y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_scalar_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, alpha, Y);
}
template <typename Dtype>
__global__ void add_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] + b[index];
}
}
template <>
void caffe_gpu_add<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_add<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
add_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void sub_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] - b[index];
}
}
template <>
void caffe_gpu_sub<float>(const int N, const float* a, const float* b,
float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_sub<double>(const int N, const double* a, const double* b,
double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
sub_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void mul_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] * b[index];
}
}
template <>
void caffe_gpu_mul<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_mul<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
mul_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void div_kernel(const int n, const Dtype* a,
const Dtype* b, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = a[index] / b[index];
}
}
template <>
void caffe_gpu_div<float>(const int N, const float* a,
const float* b, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <>
void caffe_gpu_div<double>(const int N, const double* a,
const double* b, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
div_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, b, y);
}
template <typename Dtype>
__global__ void abs_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = abs(a[index]);
}
}
template <>
void caffe_gpu_abs<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_abs<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
abs_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void exp_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = exp(a[index]);
}
}
template <>
void caffe_gpu_exp<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_exp<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
exp_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void log_kernel(const int n, const Dtype* a, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = log(a[index]);
}
}
template <>
void caffe_gpu_log<float>(const int N, const float* a, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <>
void caffe_gpu_log<double>(const int N, const double* a, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
log_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, y);
}
template <typename Dtype>
__global__ void powx_kernel(const int n, const Dtype* a,
const Dtype alpha, Dtype* y) {
CUDA_KERNEL_LOOP(index, n) {
y[index] = pow(a[index], alpha);
}
}
template <>
void caffe_gpu_powx<float>(const int N, const float* a,
const float alpha, float* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<float><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
template <>
void caffe_gpu_powx<double>(const int N, const double* a,
const double alpha, double* y) {
// NOLINT_NEXT_LINE(whitespace/operators)
powx_kernel<double><<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS>>>(
N, a, alpha, y);
}
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sign, y[index] = (Dtype(0) < x[index])
- (x[index] < Dtype(0)));
DEFINE_AND_INSTANTIATE_GPU_UNARY_FUNC(sgnbit, y[index] = ::signbit(x[index]));
void caffe_gpu_rng_uniform(const int n, unsigned int* r) {
CURAND_CHECK(curandGenerate(Caffe::curand_generator(), r, n));
}
template <>
void caffe_gpu_rng_uniform<float>(const int n, const float a, const float b,
float* r) {
CURAND_CHECK(curandGenerateUniform(Caffe::curand_generator(), r, n));
const float range = b - a;
if (range != static_cast<float>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<float>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_uniform<double>(const int n, const double a, const double b,
double* r) {
CURAND_CHECK(curandGenerateUniformDouble(Caffe::curand_generator(), r, n));
const double range = b - a;
if (range != static_cast<double>(1)) {
caffe_gpu_scal(n, range, r);
}
if (a != static_cast<double>(0)) {
caffe_gpu_add_scalar(n, a, r);
}
}
template <>
void caffe_gpu_rng_gaussian(const int n, const float mu, const float sigma,
float* r) {
CURAND_CHECK(
curandGenerateNormal(Caffe::curand_generator(), r, n, mu, sigma));
}
template <>
void caffe_gpu_rng_gaussian(const int n, const double mu, const double sigma,
double* r) {
CURAND_CHECK(
curandGenerateNormalDouble(Caffe::curand_generator(), r, n, mu, sigma));
}
} // namespace caffe
|
8b4cf9cb9c85c51a9525d61087859b7e88ad67be.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
*
*******************************************************************************/
#include "animate.h"
#include <stdio.h>
/*************************************************************************/
__global__ void drawColor(unsigned char* optr,
const float* red,
const float* green,
const float* blue) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float theRed = red[offset];
// theRed = (theRed / 50.0) + 0.5;
if (theRed < 0) theRed = 0;
if (theRed > 1) theRed = 1;
float theGreen = green[offset];
// theGreen = (theGreen / 50.0) + 0.5;
if (theGreen < 0) theGreen = 0;
if (theGreen > 1) theGreen = 1;
float theBlue = blue[offset];
// theBlue = (theBlue / 50.0) + 0.5;
if (theBlue < 0) theBlue = 0;
if (theBlue > 1) theBlue = 1;
optr[offset * 4 + 0] = 255 * theRed; // red
optr[offset * 4 + 1] = 255 * theGreen; // green
optr[offset * 4 + 2] = 255 * theBlue; // blue
optr[offset * 4 + 3] = 255; // alpha (opacity)
}
/*************************************************************************/
void CPUAnimBitmap::drawPalette(void) {
dim3 threads(32, 32); // assume 32x32 = 1024 threads per block
dim3 blocks(ceil(width/32), ceil(height/32));
// dim3 threads(BLOCK_WIDTH, BLOCK_HEIGHT);
// dim3 blocks(GRID_WIDTH, GRID_HEIGHT);
// drawGray <<< blocks, threads >>> (dev_bitmap, thePalette->gray);
hipLaunchKernelGGL(( drawColor) , dim3(blocks), dim3(threads) , 0, 0, dev_bitmap,
thePalette->red,
thePalette->green,
thePalette->blue);
// copy bitmap from device to host to draw frame:
hipMemcpy(get_ptr(), dev_bitmap, image_size(), hipMemcpyDeviceToHost);
glutMainLoopEvent();
glutPostRedisplay();
}
/******************************************************************************/
CPUAnimBitmap::CPUAnimBitmap(GPU_Palette* P1) {//void* d) {
width = P1->palette_width;
height = P1->palette_height;
pixels = new unsigned char[width * height * 4];
thePalette = P1;
}
/******************************************************************************/
CPUAnimBitmap::~CPUAnimBitmap() {
delete[] pixels;
}
/******************************************************************************/
void CPUAnimBitmap::click_drag(void (* f)(void*, int, int, int, int)) {
clickDrag = f;
}
/******************************************************************************/
// static method used for glut callbacks
CPUAnimBitmap** CPUAnimBitmap::get_bitmap_ptr(void) {
static CPUAnimBitmap* gBitmap;
return &gBitmap;
}
/******************************************************************************/
// static method used for glut callbacks
void CPUAnimBitmap::mouse_func(int button, int state, int mx, int my) {
if (button == GLUT_LEFT_BUTTON) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
if (state == GLUT_DOWN) {
bitmap->dragStartX = mx;
bitmap->dragStartY = my;
} else if (state == GLUT_UP) {
bitmap->clickDrag(bitmap->thePalette,
bitmap->dragStartX,
bitmap->dragStartY,
mx, my);
}
}
}
/******************************************************************************/
// static method used for glut callbacks
void CPUAnimBitmap::Draw(void) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE,
bitmap->pixels);
glutSwapBuffers();
}
/******************************************************************************/
void CPUAnimBitmap::initAnimation() {
CPUAnimBitmap** bitmap = get_bitmap_ptr();
*bitmap = this;
int c = 1;
char* dummy = "";
glutInit(&c, &dummy);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("DNF");
// glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
if (clickDrag != NULL) glutMouseFunc(mouse_func);
}
//CUDA functions for color conversion
/******************************************************************************/
__device__ unsigned char value(float n1, float n2, int hue) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char) (255 * (n1 + (n2 - n1) * hue / 60));
if (hue < 180)
return (unsigned char) (255 * n2);
if (hue < 240)
return (unsigned char) (255 * (n1 + (n2 - n1) * (240 - hue) / 60));
return (unsigned char) (255 * n1);
}
/******************************************************************************/
| 8b4cf9cb9c85c51a9525d61087859b7e88ad67be.cu | /*******************************************************************************
*
*******************************************************************************/
#include "animate.h"
#include <stdio.h>
/*************************************************************************/
__global__ void drawColor(unsigned char* optr,
const float* red,
const float* green,
const float* blue) {
// map from threadIdx/BlockIdx to pixel position
int x = threadIdx.x + blockIdx.x * blockDim.x;
int y = threadIdx.y + blockIdx.y * blockDim.y;
int offset = x + y * blockDim.x * gridDim.x;
float theRed = red[offset];
// theRed = (theRed / 50.0) + 0.5;
if (theRed < 0) theRed = 0;
if (theRed > 1) theRed = 1;
float theGreen = green[offset];
// theGreen = (theGreen / 50.0) + 0.5;
if (theGreen < 0) theGreen = 0;
if (theGreen > 1) theGreen = 1;
float theBlue = blue[offset];
// theBlue = (theBlue / 50.0) + 0.5;
if (theBlue < 0) theBlue = 0;
if (theBlue > 1) theBlue = 1;
optr[offset * 4 + 0] = 255 * theRed; // red
optr[offset * 4 + 1] = 255 * theGreen; // green
optr[offset * 4 + 2] = 255 * theBlue; // blue
optr[offset * 4 + 3] = 255; // alpha (opacity)
}
/*************************************************************************/
void CPUAnimBitmap::drawPalette(void) {
dim3 threads(32, 32); // assume 32x32 = 1024 threads per block
dim3 blocks(ceil(width/32), ceil(height/32));
// dim3 threads(BLOCK_WIDTH, BLOCK_HEIGHT);
// dim3 blocks(GRID_WIDTH, GRID_HEIGHT);
// drawGray <<< blocks, threads >>> (dev_bitmap, thePalette->gray);
drawColor <<< blocks, threads >>> (dev_bitmap,
thePalette->red,
thePalette->green,
thePalette->blue);
// copy bitmap from device to host to draw frame:
cudaMemcpy(get_ptr(), dev_bitmap, image_size(), cudaMemcpyDeviceToHost);
glutMainLoopEvent();
glutPostRedisplay();
}
/******************************************************************************/
CPUAnimBitmap::CPUAnimBitmap(GPU_Palette* P1) {//void* d) {
width = P1->palette_width;
height = P1->palette_height;
pixels = new unsigned char[width * height * 4];
thePalette = P1;
}
/******************************************************************************/
CPUAnimBitmap::~CPUAnimBitmap() {
delete[] pixels;
}
/******************************************************************************/
void CPUAnimBitmap::click_drag(void (* f)(void*, int, int, int, int)) {
clickDrag = f;
}
/******************************************************************************/
// static method used for glut callbacks
CPUAnimBitmap** CPUAnimBitmap::get_bitmap_ptr(void) {
static CPUAnimBitmap* gBitmap;
return &gBitmap;
}
/******************************************************************************/
// static method used for glut callbacks
void CPUAnimBitmap::mouse_func(int button, int state, int mx, int my) {
if (button == GLUT_LEFT_BUTTON) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
if (state == GLUT_DOWN) {
bitmap->dragStartX = mx;
bitmap->dragStartY = my;
} else if (state == GLUT_UP) {
bitmap->clickDrag(bitmap->thePalette,
bitmap->dragStartX,
bitmap->dragStartY,
mx, my);
}
}
}
/******************************************************************************/
// static method used for glut callbacks
void CPUAnimBitmap::Draw(void) {
CPUAnimBitmap* bitmap = *(get_bitmap_ptr());
glClearColor(0.0, 0.0, 0.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT);
glDrawPixels(bitmap->width, bitmap->height, GL_RGBA, GL_UNSIGNED_BYTE,
bitmap->pixels);
glutSwapBuffers();
}
/******************************************************************************/
void CPUAnimBitmap::initAnimation() {
CPUAnimBitmap** bitmap = get_bitmap_ptr();
*bitmap = this;
int c = 1;
char* dummy = "";
glutInit(&c, &dummy);
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA);
glutInitWindowSize(width, height);
glutCreateWindow("DNF");
// glutKeyboardFunc(Key);
glutDisplayFunc(Draw);
if (clickDrag != NULL) glutMouseFunc(mouse_func);
}
//CUDA functions for color conversion
/******************************************************************************/
__device__ unsigned char value(float n1, float n2, int hue) {
if (hue > 360) hue -= 360;
else if (hue < 0) hue += 360;
if (hue < 60)
return (unsigned char) (255 * (n1 + (n2 - n1) * hue / 60));
if (hue < 180)
return (unsigned char) (255 * n2);
if (hue < 240)
return (unsigned char) (255 * (n1 + (n2 - n1) * (240 - hue) / 60));
return (unsigned char) (255 * n1);
}
/******************************************************************************/
|
f009a26fa62840d1ccdf6e4208e226c0d183fb34.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "stereotgv.h"
__global__ void TgvSolveEtaKernel(float alpha0, float alpha1,
float* atensor, float *btensor, float* ctensor,
float* etau, float* etav1, float* etav2,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
float a = atensor[pos];
float b = btensor[pos];
float c = ctensor[pos];
etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1);
etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0;
etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0;
}
}
void StereoTgv::SolveEta(float alpha0, float alpha1,
float* a, float *b, float* c,
int w, int h, int s,
float* etau, float* etav1, float* etav2)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvSolveEtaKernel << < blocks, threads >> > (alpha0, alpha1,
a, b, c,
etau, etav1, etav2,
w, h, s);
}
// *****************************
// Masked
// *****************************
__global__ void TgvSolveEtaMaskedKernel(float* mask, float alpha0, float alpha1,
float* atensor, float *btensor, float* ctensor,
float* etau, float* etav1, float* etav2,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
float a = atensor[pos];
float b = btensor[pos];
float c = ctensor[pos];
etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1);
etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0;
etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0;
}
void StereoTgv::SolveEtaMasked(float* mask, float alpha0, float alpha1,
float* a, float *b, float* c,
int w, int h, int s,
float* etau, float* etav1, float* etav2)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvSolveEtaMaskedKernel << < blocks, threads >> > (mask, alpha0, alpha1,
a, b, c,
etau, etav1, etav2,
w, h, s);
} | f009a26fa62840d1ccdf6e4208e226c0d183fb34.cu | #include "stereotgv.h"
__global__ void TgvSolveEtaKernel(float alpha0, float alpha1,
float* atensor, float *btensor, float* ctensor,
float* etau, float* etav1, float* etav2,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy < height) && (ix < width))
{
int pos = ix + iy * stride;
float a = atensor[pos];
float b = btensor[pos];
float c = ctensor[pos];
etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1);
etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0;
etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0;
}
}
void StereoTgv::SolveEta(float alpha0, float alpha1,
float* a, float *b, float* c,
int w, int h, int s,
float* etau, float* etav1, float* etav2)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvSolveEtaKernel << < blocks, threads >> > (alpha0, alpha1,
a, b, c,
etau, etav1, etav2,
w, h, s);
}
// *****************************
// Masked
// *****************************
__global__ void TgvSolveEtaMaskedKernel(float* mask, float alpha0, float alpha1,
float* atensor, float *btensor, float* ctensor,
float* etau, float* etav1, float* etav2,
int width, int height, int stride)
{
int iy = blockIdx.y * blockDim.y + threadIdx.y; // current row
int ix = blockIdx.x * blockDim.x + threadIdx.x; // current column
if ((iy >= height) && (ix >= width)) return;
int pos = ix + iy * stride;
if (mask[pos] == 0.0f) return;
float a = atensor[pos];
float b = btensor[pos];
float c = ctensor[pos];
etau[pos] = (a*a + b * b + 2 * c*c + (a + c)*(a + c) + (b + c)*(b + c)) * (alpha1 * alpha1);
etav1[pos] = (alpha1 * alpha1)*(b * b + c * c) + 4 * alpha0 * alpha0;
etav2[pos] = (alpha1 * alpha1)*(a * a + c * c) + 4 * alpha0 * alpha0;
}
void StereoTgv::SolveEtaMasked(float* mask, float alpha0, float alpha1,
float* a, float *b, float* c,
int w, int h, int s,
float* etau, float* etav1, float* etav2)
{
dim3 threads(BlockWidth, BlockHeight);
dim3 blocks(iDivUp(w, threads.x), iDivUp(h, threads.y));
TgvSolveEtaMaskedKernel << < blocks, threads >> > (mask, alpha0, alpha1,
a, b, c,
etau, etav1, etav2,
w, h, s);
} |
3963eadc5886656cf96bce74df9e628a715cab70.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "poly_div8.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *poli = NULL;
hipMalloc(&poli, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
poly_div8), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
poly_div8), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
poly_div8), dim3(gridBlock),dim3(threadBlock), 0, 0, poli,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 3963eadc5886656cf96bce74df9e628a715cab70.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "poly_div8.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *poli = NULL;
cudaMalloc(&poli, XSIZE*YSIZE);
const int N = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
poly_div8<<<gridBlock,threadBlock>>>(poli,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
poly_div8<<<gridBlock,threadBlock>>>(poli,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
poly_div8<<<gridBlock,threadBlock>>>(poli,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
3c94ba91828838f05de5c03556d3eac87f357104.hip | // !!! This is a file automatically generated by hipify!!!
/*
* GPU kernel functions for semi-global matching algorithm
*/
#ifndef _SEMIGLOBALMATCHING_KERNEL_CU_
#define _SEMIGLOBALMATCHING_KERNEL_CU_
#include "hip/hip_runtime.h"
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#define NUM_PARALLEL_THREADS 17
typedef unsigned char uchar;
typedef unsigned int uint;
struct DeviceImage
{
int rows;
int cols;
uchar3 *bgr; // pointer to BGR image on device
float *gray; // pointer to grayscale image on device
hipArray *array; // pointer to hipArray we use for texture
};
struct CostMatrices {
int width; // width of matrices (x)
int height; // height of matrices (y)
int depth; // depth of matrices (z)
float * C; // initial costs matrix
float * E[8]; // energy matrix for each direction
float * S; // weighted and summed energy matrices
};
// global texture references for bound image data
texture<float, hipTextureType2D, hipReadModeElementType> bTex;
texture<float, hipTextureType2D, hipReadModeElementType> mTex;
// check if index is in image bounds
static __device__ __forceinline__ bool in_img(int x, int y, int rows, int cols)
{
return x >= 0 && x < cols && y >= 0 && y < rows;
}
// convert BGR image to grayscale
template<int pxPerThread>
__global__ void bgr_to_grayscale(DeviceImage img)
{
// get global index within image
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = pxPerThread * (blockIdx.y*blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < pxPerThread; ++i)
{
// get BGR pixel values
uchar3 p;
if (in_img(x, y + i, img.rows, img.cols))
p = img.bgr[(y + i) * img.cols + x];
else
return;
// calculate grayscale value
float g = 0.298839f*(float)p.z + 0.586811f*(float)p.y + 0.114350f*(float)p.x;
// set grayscale value in image
if (in_img(x, y + i, img.rows, img.cols))
img.gray[(y + i) * img.cols + x] = (g >= 255.f ? 255.f : g);
}
}
// Call grayscale conversion kernel
template<int pxPerThread>
int getGrayscaleImage(DeviceImage * img)
{
// allocate memory
img->gray = 0;
hipMalloc((void**)&img->gray, img->rows * img->cols * sizeof(float));
if (img->gray == 0)
{
std::cerr << "Failed to allocate memory" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (img->cols + block_size.x - 1) / block_size.x;
grid_size.y = (img->rows + pxPerThread * block_size.y - 1) / (pxPerThread * block_size.y);
// call kernel
hipLaunchKernelGGL(( bgr_to_grayscale<pxPerThread>) , dim3(grid_size), dim3(block_size) , 0, 0, *img);
// can now free the BGR device memory
hipFree(img->bgr);
return 0;
}
// Calculate initial costs of the images and store into cost matrix C
__global__ void initial_costs(CostMatrices cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
// compute normalized coords
float u = x / (float)w;
float v = y / (float)h;
float offset = d / (float)w;
// fetch values from texture
float baseVal = tex2D(bTex, u, v);
float matchVal = tex2D(mTex, u - offset, v);
// compute C(x,y,d) and write to matrix
if (x < w && y < h && d < dmax)
cm.C[(x + (y * w)) * dmax + d] = fabs(baseVal - matchVal);
}
// Bind images on device to textures
void intializeTextures(DeviceImage bImg, DeviceImage mImg)
{
// allocate 2D cuda arrays
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);
hipMallocArray(&bImg.array, &channelDesc, bImg.cols, bImg.rows);
hipMallocArray(&mImg.array, &channelDesc, mImg.cols, mImg.rows);
// copy images into 2D cuda arrays
hipMemcpyToArray(bImg.array, 0, 0, bImg.gray, bImg.rows * bImg.cols * sizeof(float), hipMemcpyDeviceToDevice);
hipMemcpyToArray(mImg.array, 0, 0, mImg.gray, mImg.rows * mImg.cols * sizeof(float), hipMemcpyDeviceToDevice);
// set texture parameters
bTex.normalized = true; // access with normalized texture coordinates
bTex.filterMode = hipFilterModeLinear; // linear interpolation
bTex.addressMode[0] = hipAddressModeBorder; // OOB texture calls return clamped edge value
bTex.addressMode[1] = hipAddressModeBorder;
mTex.normalized = true;
mTex.filterMode = hipFilterModeLinear;
mTex.addressMode[0] = hipAddressModeBorder;
mTex.addressMode[1] = hipAddressModeBorder;
// bind arrays to texture
hipBindTextureToArray(bTex, bImg.array, channelDesc);
hipBindTextureToArray(mTex, mImg.array, channelDesc);
// can now free original grayscale images from linear memory
hipFree(bImg.gray);
hipFree(mImg.gray);
}
// Call initial cost calculation kernel
int getInitialCosts(CostMatrices& cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory on device for the cost matrix
cm.C = 0;
hipMalloc((void**)&cm.C, h * w * dmax * sizeof(float));
if (cm.C == 0)
{
std::cerr << "Failed to allocate memory for initial cost matrix!" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(4, 4, 32);
dim3 grid_size(0, 0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + block_size.y - 1) / (block_size.y);
grid_size.z = (dmax + block_size.z - 1) / (block_size.z);
// call kernel on GPU
hipLaunchKernelGGL(( initial_costs), dim3(grid_size), dim3(block_size), 0, 0, cm);
return 0;
}
template<int numTestDirections>
int getPenaltyValues(float ** P1, float ** P2)
{
// use values given in paper
float h_P1[numTestDirections] = { 22.02, 22.02, 17.75, 17.75, 14.93, 14.93, 10.67, 10.67 };
float h_P2[numTestDirections] = { 82.79, 82.79, 80.87, 80.87, 23.30, 23.30, 28.80, 28.80 };
// copy values to device
*P1 = 0;
*P2 = 0;
hipMalloc((void**)P1, numTestDirections * sizeof(float));
hipMalloc((void**)P2, numTestDirections * sizeof(float));
if (*P1 == 0 || *P2 == 0)
{
std::cerr << "Failed to allocate memory for penalty values" << std::endl;
return 1;
}
hipMemcpy(*P1, h_P1, numTestDirections * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(*P2, h_P2, numTestDirections * sizeof(float), hipMemcpyHostToDevice);
return 0;
}
// stuff
__global__ void path_traversal(CostMatrices cm, float * P1, float * P2)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int d = threadIdx.x; // thread id corresponds to d value
const int il = threadIdx.y;
const int dir = blockIdx.x;
int dx = 0;
int dy = 0;
int x0 = 0;
int y0 = 0;
int x_ = 0;
int y_ = 0;
int maxItr = 0;
// each block works on a different direction
switch (dir) {
// HORIZONTAL FORWARD DIRECTION
case 0:
// (forward direction)
dx = 1;
x0 = 0;
// do edge case
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
// E(x,y,d)
for (int x = x0 + dx; x < w; x += dx) {
// wait for threads to sync up
__syncthreads();
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + (y * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + (y * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + (y * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// HORIZONTAL REVERSE DIRECTION
case 1:
// (reverse direction)
dx = -1;
x0 = w - 1;
// do edge case
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
// E(x,y,d)
for (int x = x0 + dx; x >= 0; x += dx) {
// wait for threads to sync up
__syncthreads();
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + (y * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + (y * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + (y * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// VERTICAL BOTTOM->TOP
case 2:
// (forward direction)
dy = 1;
y0 = 0;
// do edge case
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// E(x,y,d)
for (int y = y0 + dy; y < h; y += dy) {
// wait for threads to sync up
__syncthreads();
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// VERTICAL TOP->BOTTOM
case 3:
// (reverse direction)
dy = -1;
y0 = h - 1;
// do edge case
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// E(x,y,d)
for (int y = y0 + dy; y >= 0; y += dy) {
// wait for threads to sync up
__syncthreads();
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL TOPLEFT->BOTTOMRIGHT
case 4:
// top left -> bottom right
dx = 1;
dy = -1;
x0 = 0;
y0 = h - 1;
// do top row edge case
for (int x = x0+il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do first col edge case
for (int y = y0-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_+il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL BOTTOMRIGHT->TOPLEFT
case 5:
// bottom right -> top left
dx = -1;
dy = 1;
x0 = w - 1;
y0 = 0;
// do bottom row edge case
for (int x = x0-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do last col edge case
for (int y = y0+il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_+il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL BOTTOMLEFT->TOPRIGHT
case 6:
// bottom left -> top right
dx = 1;
dy = 1;
x0 = 0;
y0 = 0;
// do row edge case
for (int x = x0+il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do col edge case
for (int y = y0+il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_+il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_+il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL TOPRIGHT->BOTTOMLEFT
case 7:
// top right -> bottom left
dx = -1;
dy = -1;
x0 = w - 1;
y0 = h - 1;
// do row edge case
for (int x = x0-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do col edge case
for (int y = y0-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
}
}
template<int numTestDirections>
int doPathTraversal(CostMatrices& cm, float * P1, float * P2)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory
for (int i = 0; i < numTestDirections; i++) {
cm.E[i] = 0;
hipMalloc((void**)&cm.E[i], h * w * dmax * sizeof(float));
if (cm.E[i] == 0) {
std::cerr << "ERROR: E[" << i << "] failed to allocate memory" << std::endl;
return 1;
}
}
dim3 block_size(dmax,NUM_PARALLEL_THREADS);
dim3 grid_size(numTestDirections);
hipLaunchKernelGGL(( path_traversal) , dim3(grid_size), dim3(block_size) , 0, 0, cm, P1, P2);
// can now free initial costs matrix
hipFree(cm.C);
return 0;
}
// get weightings for each direction
template<int numTestDirections>
int getDirectionWeightings(float ** d_weights)
{
// for now we'll just use 1/numdirections
// set values on host
//float h_weights[numTestDirections] = { 0.01, 0.01, 0.01, 0.01, 0.9f, 0.9f, 0.9f, 0.9f };
//float h_weights[numTestDirections] = { 0.96f, 0.96f, 0.98f, 0.98f, 0.06f, 0.06f, 0.27f, 0.27f };
float h_weights[numTestDirections];
std::fill_n(h_weights, numTestDirections, 1.f / (float)numTestDirections);
// copy values to device
*d_weights = 0;
hipMalloc((void**)d_weights, numTestDirections * sizeof(float));
if (*d_weights == 0)
{
std::cerr << "Failed to allocate memory for direction weights" << std::endl;
return 1;
}
hipMemcpy(*d_weights, h_weights, numTestDirections * sizeof(float), hipMemcpyHostToDevice);
return 0;
}
__global__ void sum_energy_matrices(const CostMatrices cm, float * weights, int numMatrices = 8)
{
// each thread performs sum for all d values at a given x,y
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int w = cm.width;
int h = cm.height;
int dmax = cm.depth;
float elementSum;
if (x < w && y < h){
for (int d = 0; d < dmax; d++) {
elementSum = 0;
for (int i = 0; i < numMatrices; i++) {
elementSum += weights[i] * cm.E[i][(x + (y * w)) * dmax + d];
}
cm.S[(x + (y * w)) * dmax + d] = elementSum;
}
}
}
template<int numTestDirections>
int getFinalCosts(CostMatrices& cm, float * d_weights)
{
int h = cm.height;
int w = cm.width;
int dmax = cm.depth;
// allocate memory
cm.S = 0;
hipMalloc((void**)&cm.S, h * w * dmax * sizeof(float));
if (cm.S == 0) {
std::cerr << "ERROR: S[] failed to allocate memory" << std::endl;
return 1;
}
dim3 block_size(32,32);
dim3 grid_size(0,0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
sum_energy_matrices << <grid_size, block_size >> >(cm, d_weights, numTestDirections);
// can now free energy matrices
for (int i = 0; i < numTestDirections; i++) {
hipFree(cm.E[i]);
}
return 0;
}
__global__ void find_minima(const CostMatrices cm, float * D)
{
// each thread find minima over all d values at a given x,y
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int w = cm.width;
int h = cm.height;
int dmax = cm.depth;
int mind;
float mindval;
if (x < w && y < h) {
mind = 0;
mindval = cm.S[(x + (y * w)) * dmax];
for (int d = 1; d < dmax; d++) {
float test_dval = cm.S[(x + (y * w)) * dmax + d];
if (test_dval < mindval) {
mindval = test_dval;
mind = d;
}
}
D[x + (y * w)] = mind;
}
}
int getDisparities(const CostMatrices cm, float ** D)
{
int h = cm.height;
int w = cm.width;
// allocate memory
*D = 0;
hipMalloc((void**)D, h * w * sizeof(float));
if (*D == 0) {
std::cerr << "ERROR: D[] failed to allocate memory" << std::endl;
return 1;
}
dim3 block_size(32, 32);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
find_minima << <grid_size, block_size >> >(cm, *D);
// can now free final cost matrix
hipFree(cm.S);
return 0;
}
// compute initial costs where base and match (left and right) images are reversed
__global__ void initial_costs_reverse(CostMatrices cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
// compute normalized coords
float u = x / (float)w;
float v = y / (float)h;
float offset = d / (float)w;
// fetch values from texture
float baseVal = tex2D(mTex, u, v);
float matchVal = tex2D(bTex, u + offset, v);
// compute C(x,y,d) and write to matrix
if (x < w && y < h && d < dmax)
cm.C[(x + (y * w)) * dmax + d] = fabs(baseVal - matchVal);
}
int getInitialCosts_reverse(CostMatrices& cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory on device for the cost matrix
cm.C = 0;
hipMalloc((void**)&cm.C, h * w * dmax * sizeof(float));
if (cm.C == 0)
{
std::cerr << "Failed to allocate memory for initial cost matrix!" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(4, 4, 32);
dim3 grid_size(0, 0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + block_size.y - 1) / (block_size.y);
grid_size.z = (dmax + block_size.z - 1) / (block_size.z);
// call kernel on GPU
initial_costs_reverse << <grid_size, block_size >> >(cm);
return 0;
}
// detect occlusion areas and set to zero
template<int pxPerThread>
__global__ void refine_dmap(float * D_base, const float * D_ref, int h, int w)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = pxPerThread * (blockIdx.y*blockDim.y + threadIdx.y);
const int tolerance = 3; // set some tolerance (px)
for (int i = 0; i < pxPerThread; i++) {
if (in_img(x, y + i, h, w)) {
int baseVal = D_base[x + ((y + i) * w)];
int matchVal;
if (x + baseVal < w) matchVal = D_ref[x - baseVal + ((y + i) * w)];
else continue;
if (abs(baseVal - matchVal) > tolerance)
D_base[x + ((y + i) * w)] = 0;
}
}
}
// refines base disparity map (writes to base map)
template<int pxPerThread>
void refineDisparityMap(float * D_base, float * D_ref, int h, int w)
{
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + pxPerThread * block_size.y - 1) / (pxPerThread * block_size.y);
hipLaunchKernelGGL(( refine_dmap<pxPerThread>), dim3(grid_size), dim3(block_size), 0, 0, D_base, D_ref, h, w);
// no longer need the match image disparity map
hipFree(D_ref);
}
// applies median filter with 3x3 kernel to image
__global__ void median_filter_3x3(float * d_input_img, float * d_output_img, int h, int w)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float window[9];
if (!in_img(x,y,h,w))
return;
// get elements for kernel
window[0] = (y == 0 || x == 0) ? 0 : d_input_img[(y - 1)*h + (x - 1)];
window[1] = (y == 0) ? 0 : d_input_img[(y - 1)*w + x];
window[2] = (y == 0 || x == w - 1) ? 0 : d_input_img[(y - 1)*w + (x + 1)];
window[3] = (x == 0) ? 0 : d_input_img[y*w + (x - 1)];
window[4] = d_input_img[y*w + x];
window[5] = (x == w - 1) ? 0 : d_input_img[y*w + (x + 1)];
window[6] = (y == h - 1 || x == 0) ? 0 : d_input_img[(y + 1)*w + (x - 1)];
window[7] = (y == h - 1) ? 0 : d_input_img[(y + 1)*w + x];
window[8] = (y == h - 1 || x == w - 1) ? 0 : d_input_img[(y + 1)*w + (x + 1)];
// order elements
for (uint j = 0; j<5; ++j)
{
// find position of minimum element
float temp = window[j];
uint idx = j;
for (uint l = j + 1; l<9; ++l)
if (window[l] < temp){ idx = l; temp = window[l]; }
// put found minimum element in its place
window[idx] = window[j];
window[j] = temp;
}
// write median value
d_output_img[y*w + x] = window[4];
}
int doMedianFiltering(float ** image, int h, int w)
{
// allocate memory
float * result = 0;
hipMalloc((void**)&result, h * w * sizeof(float));
if (result == 0) {
std::cerr << "ERROR: failed to allocate memory for median filtering" << std::endl;
return 1;
}
dim3 block_size(16, 32);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
hipLaunchKernelGGL(( median_filter_3x3) , dim3(grid_size), dim3(block_size) , 0, 0, *image, result, h, w);
// free original image and return filtered result
hipFree(*image);
*image = result;
return 0;
}
#endif // #ifndef _SEMIGLOBALMATCHING_KERNEL_CU_
| 3c94ba91828838f05de5c03556d3eac87f357104.cu | /*
* GPU kernel functions for semi-global matching algorithm
*/
#ifndef _SEMIGLOBALMATCHING_KERNEL_CU_
#define _SEMIGLOBALMATCHING_KERNEL_CU_
#include "cuda.h"
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <math.h>
#define NUM_PARALLEL_THREADS 17
typedef unsigned char uchar;
typedef unsigned int uint;
struct DeviceImage
{
int rows;
int cols;
uchar3 *bgr; // pointer to BGR image on device
float *gray; // pointer to grayscale image on device
cudaArray *array; // pointer to cudaArray we use for texture
};
struct CostMatrices {
int width; // width of matrices (x)
int height; // height of matrices (y)
int depth; // depth of matrices (z)
float * C; // initial costs matrix
float * E[8]; // energy matrix for each direction
float * S; // weighted and summed energy matrices
};
// global texture references for bound image data
texture<float, cudaTextureType2D, cudaReadModeElementType> bTex;
texture<float, cudaTextureType2D, cudaReadModeElementType> mTex;
// check if index is in image bounds
static __device__ __forceinline__ bool in_img(int x, int y, int rows, int cols)
{
return x >= 0 && x < cols && y >= 0 && y < rows;
}
// convert BGR image to grayscale
template<int pxPerThread>
__global__ void bgr_to_grayscale(DeviceImage img)
{
// get global index within image
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = pxPerThread * (blockIdx.y*blockDim.y + threadIdx.y);
// loop over the number of pixels each thread is handling
for (size_t i = 0; i < pxPerThread; ++i)
{
// get BGR pixel values
uchar3 p;
if (in_img(x, y + i, img.rows, img.cols))
p = img.bgr[(y + i) * img.cols + x];
else
return;
// calculate grayscale value
float g = 0.298839f*(float)p.z + 0.586811f*(float)p.y + 0.114350f*(float)p.x;
// set grayscale value in image
if (in_img(x, y + i, img.rows, img.cols))
img.gray[(y + i) * img.cols + x] = (g >= 255.f ? 255.f : g);
}
}
// Call grayscale conversion kernel
template<int pxPerThread>
int getGrayscaleImage(DeviceImage * img)
{
// allocate memory
img->gray = 0;
cudaMalloc((void**)&img->gray, img->rows * img->cols * sizeof(float));
if (img->gray == 0)
{
std::cerr << "Failed to allocate memory" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (img->cols + block_size.x - 1) / block_size.x;
grid_size.y = (img->rows + pxPerThread * block_size.y - 1) / (pxPerThread * block_size.y);
// call kernel
bgr_to_grayscale<pxPerThread> <<<grid_size, block_size >>>(*img);
// can now free the BGR device memory
cudaFree(img->bgr);
return 0;
}
// Calculate initial costs of the images and store into cost matrix C
__global__ void initial_costs(CostMatrices cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
// compute normalized coords
float u = x / (float)w;
float v = y / (float)h;
float offset = d / (float)w;
// fetch values from texture
float baseVal = tex2D(bTex, u, v);
float matchVal = tex2D(mTex, u - offset, v);
// compute C(x,y,d) and write to matrix
if (x < w && y < h && d < dmax)
cm.C[(x + (y * w)) * dmax + d] = fabs(baseVal - matchVal);
}
// Bind images on device to textures
void intializeTextures(DeviceImage bImg, DeviceImage mImg)
{
// allocate 2D cuda arrays
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);
cudaMallocArray(&bImg.array, &channelDesc, bImg.cols, bImg.rows);
cudaMallocArray(&mImg.array, &channelDesc, mImg.cols, mImg.rows);
// copy images into 2D cuda arrays
cudaMemcpyToArray(bImg.array, 0, 0, bImg.gray, bImg.rows * bImg.cols * sizeof(float), cudaMemcpyDeviceToDevice);
cudaMemcpyToArray(mImg.array, 0, 0, mImg.gray, mImg.rows * mImg.cols * sizeof(float), cudaMemcpyDeviceToDevice);
// set texture parameters
bTex.normalized = true; // access with normalized texture coordinates
bTex.filterMode = cudaFilterModeLinear; // linear interpolation
bTex.addressMode[0] = cudaAddressModeBorder; // OOB texture calls return clamped edge value
bTex.addressMode[1] = cudaAddressModeBorder;
mTex.normalized = true;
mTex.filterMode = cudaFilterModeLinear;
mTex.addressMode[0] = cudaAddressModeBorder;
mTex.addressMode[1] = cudaAddressModeBorder;
// bind arrays to texture
cudaBindTextureToArray(bTex, bImg.array, channelDesc);
cudaBindTextureToArray(mTex, mImg.array, channelDesc);
// can now free original grayscale images from linear memory
cudaFree(bImg.gray);
cudaFree(mImg.gray);
}
// Call initial cost calculation kernel
int getInitialCosts(CostMatrices& cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory on device for the cost matrix
cm.C = 0;
cudaMalloc((void**)&cm.C, h * w * dmax * sizeof(float));
if (cm.C == 0)
{
std::cerr << "Failed to allocate memory for initial cost matrix!" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(4, 4, 32);
dim3 grid_size(0, 0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + block_size.y - 1) / (block_size.y);
grid_size.z = (dmax + block_size.z - 1) / (block_size.z);
// call kernel on GPU
initial_costs<<<grid_size, block_size>>>(cm);
return 0;
}
template<int numTestDirections>
int getPenaltyValues(float ** P1, float ** P2)
{
// use values given in paper
float h_P1[numTestDirections] = { 22.02, 22.02, 17.75, 17.75, 14.93, 14.93, 10.67, 10.67 };
float h_P2[numTestDirections] = { 82.79, 82.79, 80.87, 80.87, 23.30, 23.30, 28.80, 28.80 };
// copy values to device
*P1 = 0;
*P2 = 0;
cudaMalloc((void**)P1, numTestDirections * sizeof(float));
cudaMalloc((void**)P2, numTestDirections * sizeof(float));
if (*P1 == 0 || *P2 == 0)
{
std::cerr << "Failed to allocate memory for penalty values" << std::endl;
return 1;
}
cudaMemcpy(*P1, h_P1, numTestDirections * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(*P2, h_P2, numTestDirections * sizeof(float), cudaMemcpyHostToDevice);
return 0;
}
// stuff
__global__ void path_traversal(CostMatrices cm, float * P1, float * P2)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int d = threadIdx.x; // thread id corresponds to d value
const int il = threadIdx.y;
const int dir = blockIdx.x;
int dx = 0;
int dy = 0;
int x0 = 0;
int y0 = 0;
int x_ = 0;
int y_ = 0;
int maxItr = 0;
// each block works on a different direction
switch (dir) {
// HORIZONTAL FORWARD DIRECTION
case 0:
// (forward direction)
dx = 1;
x0 = 0;
// do edge case
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
// E(x,y,d)
for (int x = x0 + dx; x < w; x += dx) {
// wait for threads to sync up
__syncthreads();
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + (y * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + (y * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + (y * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// HORIZONTAL REVERSE DIRECTION
case 1:
// (reverse direction)
dx = -1;
x0 = w - 1;
// do edge case
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
// E(x,y,d)
for (int x = x0 + dx; x >= 0; x += dx) {
// wait for threads to sync up
__syncthreads();
for (int y = il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + (y * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + (y * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + (y * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + (y * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// VERTICAL BOTTOM->TOP
case 2:
// (forward direction)
dy = 1;
y0 = 0;
// do edge case
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// E(x,y,d)
for (int y = y0 + dy; y < h; y += dy) {
// wait for threads to sync up
__syncthreads();
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// VERTICAL TOP->BOTTOM
case 3:
// (reverse direction)
dy = -1;
y0 = h - 1;
// do edge case
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// E(x,y,d)
for (int y = y0 + dy; y >= 0; y += dy) {
// wait for threads to sync up
__syncthreads();
for (int x = il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL TOPLEFT->BOTTOMRIGHT
case 4:
// top left -> bottom right
dx = 1;
dy = -1;
x0 = 0;
y0 = h - 1;
// do top row edge case
for (int x = x0+il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do first col edge case
for (int y = y0-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_+il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL BOTTOMRIGHT->TOPLEFT
case 5:
// bottom right -> top left
dx = -1;
dy = 1;
x0 = w - 1;
y0 = 0;
// do bottom row edge case
for (int x = x0-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do last col edge case
for (int y = y0+il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_+il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL BOTTOMLEFT->TOPRIGHT
case 6:
// bottom left -> top right
dx = 1;
dy = 1;
x0 = 0;
y0 = 0;
// do row edge case
for (int x = x0+il; x < w; x=x+NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do col edge case
for (int y = y0+il; y < h; y=y+NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_+il; x < w; x=x+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_+il; y < h; y=y+NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
// DIAGONAL TOPRIGHT->BOTTOMLEFT
case 7:
// top right -> bottom left
dx = -1;
dy = -1;
x0 = w - 1;
y0 = h - 1;
// do row edge case
for (int x = x0-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
cm.E[dir][(x + (y0 * w)) * dmax + d] = cm.C[(x + (y0 * w)) * dmax + d];
}
// do col edge case
for (int y = y0-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
cm.E[dir][(x0 + (y * w)) * dmax + d] = cm.C[(x0 + (y * w)) * dmax + d];
}
maxItr = (w >= h) ? h : w;
y_ = y0;
x_ = x0;
for (int itr = 1; itr < maxItr; itr++) {
// wait for threads to sync up
__syncthreads();
// incremement starting point
x_ += dx;
y_ += dy;
// iterate over current row
int y = y_;
for (int x = x_-il; x >= 0; x=x-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
// iterate over current col
int x = x_;
for (int y = y_-il; y >= 0; y=y-NUM_PARALLEL_THREADS) {
float term1 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d];
// handle d edge cases
float term2 = (d == 0) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d - 1] + P1[dir];
float term3 = (d == dmax - 1) ? term1 : cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + d + 1] + P1[dir];
// get minimum of all last d values
float term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax] + P2[dir];
for (int i = 1; i < dmax; i++) {
float test_term4 = cm.E[dir][(x - dx + ((y - dy) * w)) * dmax + i] + P2[dir];
if (test_term4 < term4)
term4 = test_term4;
}
// get minimum over mimization terms
float minVal = fminf(term1, fminf(term2, fminf(term3, term4)));
// calculate E value
cm.E[dir][(x + (y * w)) * dmax + d] = cm.C[(x + (y * w)) * dmax + d] + minVal;
}
}
break;
}
}
template<int numTestDirections>
int doPathTraversal(CostMatrices& cm, float * P1, float * P2)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory
for (int i = 0; i < numTestDirections; i++) {
cm.E[i] = 0;
cudaMalloc((void**)&cm.E[i], h * w * dmax * sizeof(float));
if (cm.E[i] == 0) {
std::cerr << "ERROR: E[" << i << "] failed to allocate memory" << std::endl;
return 1;
}
}
dim3 block_size(dmax,NUM_PARALLEL_THREADS);
dim3 grid_size(numTestDirections);
path_traversal <<<grid_size, block_size >>>(cm, P1, P2);
// can now free initial costs matrix
cudaFree(cm.C);
return 0;
}
// get weightings for each direction
template<int numTestDirections>
int getDirectionWeightings(float ** d_weights)
{
// for now we'll just use 1/numdirections
// set values on host
//float h_weights[numTestDirections] = { 0.01, 0.01, 0.01, 0.01, 0.9f, 0.9f, 0.9f, 0.9f };
//float h_weights[numTestDirections] = { 0.96f, 0.96f, 0.98f, 0.98f, 0.06f, 0.06f, 0.27f, 0.27f };
float h_weights[numTestDirections];
std::fill_n(h_weights, numTestDirections, 1.f / (float)numTestDirections);
// copy values to device
*d_weights = 0;
cudaMalloc((void**)d_weights, numTestDirections * sizeof(float));
if (*d_weights == 0)
{
std::cerr << "Failed to allocate memory for direction weights" << std::endl;
return 1;
}
cudaMemcpy(*d_weights, h_weights, numTestDirections * sizeof(float), cudaMemcpyHostToDevice);
return 0;
}
__global__ void sum_energy_matrices(const CostMatrices cm, float * weights, int numMatrices = 8)
{
// each thread performs sum for all d values at a given x,y
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int w = cm.width;
int h = cm.height;
int dmax = cm.depth;
float elementSum;
if (x < w && y < h){
for (int d = 0; d < dmax; d++) {
elementSum = 0;
for (int i = 0; i < numMatrices; i++) {
elementSum += weights[i] * cm.E[i][(x + (y * w)) * dmax + d];
}
cm.S[(x + (y * w)) * dmax + d] = elementSum;
}
}
}
template<int numTestDirections>
int getFinalCosts(CostMatrices& cm, float * d_weights)
{
int h = cm.height;
int w = cm.width;
int dmax = cm.depth;
// allocate memory
cm.S = 0;
cudaMalloc((void**)&cm.S, h * w * dmax * sizeof(float));
if (cm.S == 0) {
std::cerr << "ERROR: S[] failed to allocate memory" << std::endl;
return 1;
}
dim3 block_size(32,32);
dim3 grid_size(0,0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
sum_energy_matrices << <grid_size, block_size >> >(cm, d_weights, numTestDirections);
// can now free energy matrices
for (int i = 0; i < numTestDirections; i++) {
cudaFree(cm.E[i]);
}
return 0;
}
__global__ void find_minima(const CostMatrices cm, float * D)
{
// each thread find minima over all d values at a given x,y
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
int w = cm.width;
int h = cm.height;
int dmax = cm.depth;
int mind;
float mindval;
if (x < w && y < h) {
mind = 0;
mindval = cm.S[(x + (y * w)) * dmax];
for (int d = 1; d < dmax; d++) {
float test_dval = cm.S[(x + (y * w)) * dmax + d];
if (test_dval < mindval) {
mindval = test_dval;
mind = d;
}
}
D[x + (y * w)] = mind;
}
}
int getDisparities(const CostMatrices cm, float ** D)
{
int h = cm.height;
int w = cm.width;
// allocate memory
*D = 0;
cudaMalloc((void**)D, h * w * sizeof(float));
if (*D == 0) {
std::cerr << "ERROR: D[] failed to allocate memory" << std::endl;
return 1;
}
dim3 block_size(32, 32);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
find_minima << <grid_size, block_size >> >(cm, *D);
// can now free final cost matrix
cudaFree(cm.S);
return 0;
}
// compute initial costs where base and match (left and right) images are reversed
__global__ void initial_costs_reverse(CostMatrices cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
const int d = blockIdx.z * blockDim.z + threadIdx.z;
// compute normalized coords
float u = x / (float)w;
float v = y / (float)h;
float offset = d / (float)w;
// fetch values from texture
float baseVal = tex2D(mTex, u, v);
float matchVal = tex2D(bTex, u + offset, v);
// compute C(x,y,d) and write to matrix
if (x < w && y < h && d < dmax)
cm.C[(x + (y * w)) * dmax + d] = fabs(baseVal - matchVal);
}
int getInitialCosts_reverse(CostMatrices& cm)
{
const int h = cm.height;
const int w = cm.width;
const int dmax = cm.depth;
// allocate memory on device for the cost matrix
cm.C = 0;
cudaMalloc((void**)&cm.C, h * w * dmax * sizeof(float));
if (cm.C == 0)
{
std::cerr << "Failed to allocate memory for initial cost matrix!" << std::endl;
return -1;
}
// define block and grid sizes
dim3 block_size(4, 4, 32);
dim3 grid_size(0, 0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + block_size.y - 1) / (block_size.y);
grid_size.z = (dmax + block_size.z - 1) / (block_size.z);
// call kernel on GPU
initial_costs_reverse << <grid_size, block_size >> >(cm);
return 0;
}
// detect occlusion areas and set to zero
template<int pxPerThread>
__global__ void refine_dmap(float * D_base, const float * D_ref, int h, int w)
{
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = pxPerThread * (blockIdx.y*blockDim.y + threadIdx.y);
const int tolerance = 3; // set some tolerance (px)
for (int i = 0; i < pxPerThread; i++) {
if (in_img(x, y + i, h, w)) {
int baseVal = D_base[x + ((y + i) * w)];
int matchVal;
if (x + baseVal < w) matchVal = D_ref[x - baseVal + ((y + i) * w)];
else continue;
if (abs(baseVal - matchVal) > tolerance)
D_base[x + ((y + i) * w)] = 0;
}
}
}
// refines base disparity map (writes to base map)
template<int pxPerThread>
void refineDisparityMap(float * D_base, float * D_ref, int h, int w)
{
dim3 block_size(32, 8);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h + pxPerThread * block_size.y - 1) / (pxPerThread * block_size.y);
refine_dmap<pxPerThread><<<grid_size, block_size>>>(D_base, D_ref, h, w);
// no longer need the match image disparity map
cudaFree(D_ref);
}
// applies median filter with 3x3 kernel to image
__global__ void median_filter_3x3(float * d_input_img, float * d_output_img, int h, int w)
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
float window[9];
if (!in_img(x,y,h,w))
return;
// get elements for kernel
window[0] = (y == 0 || x == 0) ? 0 : d_input_img[(y - 1)*h + (x - 1)];
window[1] = (y == 0) ? 0 : d_input_img[(y - 1)*w + x];
window[2] = (y == 0 || x == w - 1) ? 0 : d_input_img[(y - 1)*w + (x + 1)];
window[3] = (x == 0) ? 0 : d_input_img[y*w + (x - 1)];
window[4] = d_input_img[y*w + x];
window[5] = (x == w - 1) ? 0 : d_input_img[y*w + (x + 1)];
window[6] = (y == h - 1 || x == 0) ? 0 : d_input_img[(y + 1)*w + (x - 1)];
window[7] = (y == h - 1) ? 0 : d_input_img[(y + 1)*w + x];
window[8] = (y == h - 1 || x == w - 1) ? 0 : d_input_img[(y + 1)*w + (x + 1)];
// order elements
for (uint j = 0; j<5; ++j)
{
// find position of minimum element
float temp = window[j];
uint idx = j;
for (uint l = j + 1; l<9; ++l)
if (window[l] < temp){ idx = l; temp = window[l]; }
// put found minimum element in its place
window[idx] = window[j];
window[j] = temp;
}
// write median value
d_output_img[y*w + x] = window[4];
}
int doMedianFiltering(float ** image, int h, int w)
{
// allocate memory
float * result = 0;
cudaMalloc((void**)&result, h * w * sizeof(float));
if (result == 0) {
std::cerr << "ERROR: failed to allocate memory for median filtering" << std::endl;
return 1;
}
dim3 block_size(16, 32);
dim3 grid_size(0, 0);
grid_size.x = (w + block_size.x - 1) / block_size.x;
grid_size.y = (h * block_size.y - 1) / block_size.y;
median_filter_3x3 <<< grid_size, block_size >>> (*image, result, h, w);
// free original image and return filtered result
cudaFree(*image);
*image = result;
return 0;
}
#endif // #ifndef _SEMIGLOBALMATCHING_KERNEL_CU_
|
4fe043bdb391e57dedf1f950452b63af62c541cb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp > +0.0f - var_1) {
comp += -1.5893E34f * (var_4 * +1.4138E34f);
for (int i=0; i < var_2; ++i) {
comp = var_5 + -1.6156E36f / var_6;
}
for (int i=0; i < var_3; ++i) {
comp = -0.0f - fmodf((-1.7388E-42f * (var_7 / var_8 / (+1.8761E-17f - +1.1425E-35f))), var_9 / +0.0f / (-1.2067E35f - (-1.0432E-36f * var_10)));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
hipDeviceSynchronize();
return 0;
}
| 4fe043bdb391e57dedf1f950452b63af62c541cb.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,int var_2,int var_3,float var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10) {
if (comp > +0.0f - var_1) {
comp += -1.5893E34f * (var_4 * +1.4138E34f);
for (int i=0; i < var_2; ++i) {
comp = var_5 + -1.6156E36f / var_6;
}
for (int i=0; i < var_3; ++i) {
comp = -0.0f - fmodf((-1.7388E-42f * (var_7 / var_8 / (+1.8761E-17f - +1.1425E-35f))), var_9 / +0.0f / (-1.2067E35f - (-1.0432E-36f * var_10)));
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
int tmp_3 = atoi(argv[3]);
int tmp_4 = atoi(argv[4]);
float tmp_5 = atof(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11);
cudaDeviceSynchronize();
return 0;
}
|
c33a676227968dd462af17d0f7ff349ab6e53190.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
*
* DeepLearningNetwork.cu
*
* Code generation for function 'DeepLearningNetwork'
*
*/
/* Include files */
#include "rt_nonfinite.h"
#include "alexnet_predict.h"
#include "DeepLearningNetwork.h"
/* Type Definitions */
#include "cnn_api.hpp"
/* Function Declarations */
static __global__ void c_DeepLearningNetwork_predict_k(const real_T *inputdata,
real32_T *b_inputdata);
static __global__ void d_DeepLearningNetwork_predict_k(real32_T *inputdata,
real32_T *inputT);
static __global__ void e_DeepLearningNetwork_predict_k(real32_T *out, real32_T
*outT);
/* Function Definitions */
static __global__ __launch_bounds__(512, 1) void c_DeepLearningNetwork_predict_k
(const real_T *inputdata, real32_T *b_inputdata)
{
int32_T i0;
;
;
i0 = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(i0 >= 154587)) {
b_inputdata[i0] = (real32_T)inputdata[i0];
}
}
static __global__ __launch_bounds__(512, 1) void d_DeepLearningNetwork_predict_k
(real32_T *inputdata, real32_T *inputT)
{
uint32_T threadId;
int32_T i0;
int32_T i1;
int32_T p;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
p = (int32_T)(threadId / 51529U);
i1 = (int32_T)((threadId - (uint32_T)p * 51529U) / 227U);
i0 = (int32_T)((threadId - (uint32_T)i1 * 227U) - (uint32_T)p * 51529U);
if (((int32_T)((!(int32_T)(i0 >= 227)) && (!(int32_T)(i1 >= 227)))) &&
(!(int32_T)(p >= 3))) {
inputT[(i0 + 227 * i1) + 51529 * p] = inputdata[(i1 + 227 * i0) + 51529 * p];
}
}
static __global__ __launch_bounds__(512, 1) void e_DeepLearningNetwork_predict_k
(real32_T *out, real32_T *outT)
{
int32_T i0;
;
;
i0 = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(i0 >= 1000)) {
outT[i0] = out[i0];
}
}
void DeepLearningNetwork_predict(b_alexnet *obj, const real_T inputdata[154587],
real32_T outT[1000])
{
real32_T *gpu_inputT;
real32_T *gpu_out;
real_T *gpu_inputdata;
real32_T *b_gpu_inputdata;
real32_T *gpu_outT;
hipMalloc(&gpu_outT, 4000ULL);
hipMalloc(&gpu_out, 4000ULL);
hipMalloc(&gpu_inputT, 618348ULL);
hipMalloc(&b_gpu_inputdata, 618348ULL);
hipMalloc(&gpu_inputdata, 1236696ULL);
hipMemcpy((void *)gpu_inputdata, (void *)&inputdata[0], 1236696ULL,
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( c_DeepLearningNetwork_predict_k), dim3(dim3(302U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_inputdata, b_gpu_inputdata);
hipLaunchKernelGGL(( d_DeepLearningNetwork_predict_k), dim3(dim3(302U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
b_gpu_inputdata, gpu_inputT);
hipMemcpy(obj->inputData, gpu_inputT, 154587ULL * sizeof(real32_T),
hipMemcpyDeviceToDevice);
obj->predict();
hipMemcpy(gpu_out, obj->outputData, 1000ULL * sizeof(real32_T),
hipMemcpyDeviceToDevice);
hipLaunchKernelGGL(( e_DeepLearningNetwork_predict_k), dim3(dim3(2U, 1U, 1U)), dim3(dim3(512U, 1U, 1U)), 0, 0,
gpu_out, gpu_outT);
hipMemcpy((void *)&outT[0], (void *)gpu_outT, 4000ULL, hipMemcpyDeviceToHost);
hipFree(gpu_inputdata);
hipFree(b_gpu_inputdata);
hipFree(gpu_inputT);
hipFree(gpu_out);
hipFree(gpu_outT);
}
void DeepLearningNetwork_setup(b_alexnet *obj)
{
obj->setup();
obj->batchSize = 1;
}
b_alexnet::b_alexnet()
{
this->numLayers = 25;
this->cublasHandle = 0;
this->cudnnHandle = 0;
this->workSpace = 0;
this->layers[0] = new MWInputLayer;
this->layers[1] = new MWConvLayer;
this->layers[2] = new MWReLULayer;
this->layers[3] = new MWNormLayer;
this->layers[4] = new MWMaxPoolingLayer;
this->layers[5] = new MWConvLayer;
this->layers[6] = new MWReLULayer;
this->layers[7] = new MWNormLayer;
this->layers[8] = new MWMaxPoolingLayer;
this->layers[9] = new MWConvLayer;
this->layers[10] = new MWReLULayer;
this->layers[11] = new MWConvLayer;
this->layers[12] = new MWReLULayer;
this->layers[13] = new MWConvLayer;
this->layers[14] = new MWReLULayer;
this->layers[15] = new MWMaxPoolingLayer;
this->layers[16] = new MWFCLayer;
this->layers[17] = new MWReLULayer;
this->layers[18] = new MWPassthroughLayer;
this->layers[19] = new MWFCLayer;
this->layers[20] = new MWReLULayer;
this->layers[21] = new MWPassthroughLayer;
this->layers[22] = new MWFCLayer;
this->layers[23] = new MWSoftmaxLayer;
this->layers[24] = new MWOutputLayer;
}
b_alexnet::~b_alexnet()
{
int32_T idx;
this->cleanup();
for (idx = 0; idx < 25; idx++) {
delete this->layers[idx];
}
}
void b_alexnet::cleanup()
{
int32_T idx;
for (idx = 0; idx < 25; idx++) {
this->layers[idx]->cleanup();
}
if (this->workSpace) {
hipFree(this->workSpace);
}
if (this->cublasHandle) {
hipblasDestroy(*this->cublasHandle);
}
if (this->cudnnHandle) {
cudnnDestroy(*this->cudnnHandle);
}
}
void b_alexnet::predict()
{
int32_T idx;
for (idx = 0; idx < 25; idx++) {
this->layers[idx]->predict();
}
}
void b_alexnet::setup()
{
int32_T idx_handles;
int32_T idx_ws;
this->cublasHandle = new hipblasHandle_t;
hipblasCreate(this->cublasHandle);
this->cudnnHandle = new cudnnHandle_t;
cudnnCreate(this->cudnnHandle);
for (idx_handles = 0; idx_handles < 25; idx_handles++) {
this->layers[idx_handles]->setCublasHandle(this->cublasHandle);
this->layers[idx_handles]->setCudnnHandle(this->cudnnHandle);
}
this->layers[0]->createInputLayer(1, 227, 227, 3, 1);
this->layers[0]->loadAvg(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_avg");
this->layers[1]->createConvLayer(this->layers[0], 11, 11, 3, 96, 4, 4, 0, 0, 1);
this->layers[1]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv1_w");
this->layers[1]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv1_b");
this->layers[2]->createReLULayer(this->layers[1]);
this->layers[3]->createNormLayer(this->layers[2], 5, 0.0001, 0.75, 1.0);
this->layers[4]->createMaxPoolingLayer(this->layers[3], 3, 3, 2, 2, 0, 0);
this->layers[5]->createConvLayer(this->layers[4], 5, 5, 48, 128, 1, 1, 2, 2, 2);
this->layers[5]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv2_w");
this->layers[5]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv2_b");
this->layers[6]->createReLULayer(this->layers[5]);
this->layers[7]->createNormLayer(this->layers[6], 5, 0.0001, 0.75, 1.0);
this->layers[8]->createMaxPoolingLayer(this->layers[7], 3, 3, 2, 2, 0, 0);
this->layers[9]->createConvLayer(this->layers[8], 3, 3, 256, 384, 1, 1, 1, 1,
1);
this->layers[9]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv3_w");
this->layers[9]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv3_b");
this->layers[10]->createReLULayer(this->layers[9]);
this->layers[11]->createConvLayer(this->layers[10], 3, 3, 192, 192, 1, 1, 1, 1,
2);
this->layers[11]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv4_w");
this->layers[11]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv4_b");
this->layers[12]->createReLULayer(this->layers[11]);
this->layers[13]->createConvLayer(this->layers[12], 3, 3, 192, 128, 1, 1, 1, 1,
2);
this->layers[13]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv5_w");
this->layers[13]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv5_b");
this->layers[14]->createReLULayer(this->layers[13]);
this->layers[15]->createMaxPoolingLayer(this->layers[14], 3, 3, 2, 2, 0, 0);
this->layers[16]->createFCLayer(this->layers[15], 9216, 4096);
this->layers[16]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc6_w");
this->layers[16]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc6_b");
this->layers[17]->createReLULayer(this->layers[16]);
this->layers[18]->createPassthroughLayer(this->layers[17]);
this->layers[19]->createFCLayer(this->layers[18], 4096, 4096);
this->layers[19]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc7_w");
this->layers[19]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc7_b");
this->layers[20]->createReLULayer(this->layers[19]);
this->layers[21]->createPassthroughLayer(this->layers[20]);
this->layers[22]->createFCLayer(this->layers[21], 4096, 1000);
this->layers[22]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc8_w");
this->layers[22]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc8_b");
this->layers[23]->createSoftmaxLayer(this->layers[22]);
this->layers[24]->createOutputLayer(this->layers[23]);
this->layers[24]->createWorkSpace((&this->workSpace));
for (idx_ws = 0; idx_ws < 25; idx_ws++) {
this->layers[idx_ws]->setWorkSpace(this->workSpace);
}
this->inputData = this->layers[0]->getData();
this->outputData = this->layers[24]->getData();
}
/* End of code generation (DeepLearningNetwork.cu) */
| c33a676227968dd462af17d0f7ff349ab6e53190.cu | /*
* Academic License - for use in teaching, academic research, and meeting
* course requirements at degree granting institutions only. Not for
* government, commercial, or other organizational use.
*
* DeepLearningNetwork.cu
*
* Code generation for function 'DeepLearningNetwork'
*
*/
/* Include files */
#include "rt_nonfinite.h"
#include "alexnet_predict.h"
#include "DeepLearningNetwork.h"
/* Type Definitions */
#include "cnn_api.hpp"
/* Function Declarations */
static __global__ void c_DeepLearningNetwork_predict_k(const real_T *inputdata,
real32_T *b_inputdata);
static __global__ void d_DeepLearningNetwork_predict_k(real32_T *inputdata,
real32_T *inputT);
static __global__ void e_DeepLearningNetwork_predict_k(real32_T *out, real32_T
*outT);
/* Function Definitions */
static __global__ __launch_bounds__(512, 1) void c_DeepLearningNetwork_predict_k
(const real_T *inputdata, real32_T *b_inputdata)
{
int32_T i0;
;
;
i0 = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(i0 >= 154587)) {
b_inputdata[i0] = (real32_T)inputdata[i0];
}
}
static __global__ __launch_bounds__(512, 1) void d_DeepLearningNetwork_predict_k
(real32_T *inputdata, real32_T *inputT)
{
uint32_T threadId;
int32_T i0;
int32_T i1;
int32_T p;
;
;
threadId = ((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y) +
blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y * blockDim.x)
+ threadIdx.x;
p = (int32_T)(threadId / 51529U);
i1 = (int32_T)((threadId - (uint32_T)p * 51529U) / 227U);
i0 = (int32_T)((threadId - (uint32_T)i1 * 227U) - (uint32_T)p * 51529U);
if (((int32_T)((!(int32_T)(i0 >= 227)) && (!(int32_T)(i1 >= 227)))) &&
(!(int32_T)(p >= 3))) {
inputT[(i0 + 227 * i1) + 51529 * p] = inputdata[(i1 + 227 * i0) + 51529 * p];
}
}
static __global__ __launch_bounds__(512, 1) void e_DeepLearningNetwork_predict_k
(real32_T *out, real32_T *outT)
{
int32_T i0;
;
;
i0 = (int32_T)(((((gridDim.x * gridDim.y * blockIdx.z + gridDim.x * blockIdx.y)
+ blockIdx.x) * (blockDim.x * blockDim.y * blockDim.z) +
threadIdx.z * blockDim.x * blockDim.y) + threadIdx.y *
blockDim.x) + threadIdx.x);
if (!(int32_T)(i0 >= 1000)) {
outT[i0] = out[i0];
}
}
void DeepLearningNetwork_predict(b_alexnet *obj, const real_T inputdata[154587],
real32_T outT[1000])
{
real32_T *gpu_inputT;
real32_T *gpu_out;
real_T *gpu_inputdata;
real32_T *b_gpu_inputdata;
real32_T *gpu_outT;
cudaMalloc(&gpu_outT, 4000ULL);
cudaMalloc(&gpu_out, 4000ULL);
cudaMalloc(&gpu_inputT, 618348ULL);
cudaMalloc(&b_gpu_inputdata, 618348ULL);
cudaMalloc(&gpu_inputdata, 1236696ULL);
cudaMemcpy((void *)gpu_inputdata, (void *)&inputdata[0], 1236696ULL,
cudaMemcpyHostToDevice);
c_DeepLearningNetwork_predict_k<<<dim3(302U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_inputdata, b_gpu_inputdata);
d_DeepLearningNetwork_predict_k<<<dim3(302U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(b_gpu_inputdata, gpu_inputT);
cudaMemcpy(obj->inputData, gpu_inputT, 154587ULL * sizeof(real32_T),
cudaMemcpyDeviceToDevice);
obj->predict();
cudaMemcpy(gpu_out, obj->outputData, 1000ULL * sizeof(real32_T),
cudaMemcpyDeviceToDevice);
e_DeepLearningNetwork_predict_k<<<dim3(2U, 1U, 1U), dim3(512U, 1U, 1U)>>>
(gpu_out, gpu_outT);
cudaMemcpy((void *)&outT[0], (void *)gpu_outT, 4000ULL, cudaMemcpyDeviceToHost);
cudaFree(gpu_inputdata);
cudaFree(b_gpu_inputdata);
cudaFree(gpu_inputT);
cudaFree(gpu_out);
cudaFree(gpu_outT);
}
void DeepLearningNetwork_setup(b_alexnet *obj)
{
obj->setup();
obj->batchSize = 1;
}
b_alexnet::b_alexnet()
{
this->numLayers = 25;
this->cublasHandle = 0;
this->cudnnHandle = 0;
this->workSpace = 0;
this->layers[0] = new MWInputLayer;
this->layers[1] = new MWConvLayer;
this->layers[2] = new MWReLULayer;
this->layers[3] = new MWNormLayer;
this->layers[4] = new MWMaxPoolingLayer;
this->layers[5] = new MWConvLayer;
this->layers[6] = new MWReLULayer;
this->layers[7] = new MWNormLayer;
this->layers[8] = new MWMaxPoolingLayer;
this->layers[9] = new MWConvLayer;
this->layers[10] = new MWReLULayer;
this->layers[11] = new MWConvLayer;
this->layers[12] = new MWReLULayer;
this->layers[13] = new MWConvLayer;
this->layers[14] = new MWReLULayer;
this->layers[15] = new MWMaxPoolingLayer;
this->layers[16] = new MWFCLayer;
this->layers[17] = new MWReLULayer;
this->layers[18] = new MWPassthroughLayer;
this->layers[19] = new MWFCLayer;
this->layers[20] = new MWReLULayer;
this->layers[21] = new MWPassthroughLayer;
this->layers[22] = new MWFCLayer;
this->layers[23] = new MWSoftmaxLayer;
this->layers[24] = new MWOutputLayer;
}
b_alexnet::~b_alexnet()
{
int32_T idx;
this->cleanup();
for (idx = 0; idx < 25; idx++) {
delete this->layers[idx];
}
}
void b_alexnet::cleanup()
{
int32_T idx;
for (idx = 0; idx < 25; idx++) {
this->layers[idx]->cleanup();
}
if (this->workSpace) {
cudaFree(this->workSpace);
}
if (this->cublasHandle) {
cublasDestroy(*this->cublasHandle);
}
if (this->cudnnHandle) {
cudnnDestroy(*this->cudnnHandle);
}
}
void b_alexnet::predict()
{
int32_T idx;
for (idx = 0; idx < 25; idx++) {
this->layers[idx]->predict();
}
}
void b_alexnet::setup()
{
int32_T idx_handles;
int32_T idx_ws;
this->cublasHandle = new cublasHandle_t;
cublasCreate(this->cublasHandle);
this->cudnnHandle = new cudnnHandle_t;
cudnnCreate(this->cudnnHandle);
for (idx_handles = 0; idx_handles < 25; idx_handles++) {
this->layers[idx_handles]->setCublasHandle(this->cublasHandle);
this->layers[idx_handles]->setCudnnHandle(this->cudnnHandle);
}
this->layers[0]->createInputLayer(1, 227, 227, 3, 1);
this->layers[0]->loadAvg(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_avg");
this->layers[1]->createConvLayer(this->layers[0], 11, 11, 3, 96, 4, 4, 0, 0, 1);
this->layers[1]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv1_w");
this->layers[1]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv1_b");
this->layers[2]->createReLULayer(this->layers[1]);
this->layers[3]->createNormLayer(this->layers[2], 5, 0.0001, 0.75, 1.0);
this->layers[4]->createMaxPoolingLayer(this->layers[3], 3, 3, 2, 2, 0, 0);
this->layers[5]->createConvLayer(this->layers[4], 5, 5, 48, 128, 1, 1, 2, 2, 2);
this->layers[5]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv2_w");
this->layers[5]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv2_b");
this->layers[6]->createReLULayer(this->layers[5]);
this->layers[7]->createNormLayer(this->layers[6], 5, 0.0001, 0.75, 1.0);
this->layers[8]->createMaxPoolingLayer(this->layers[7], 3, 3, 2, 2, 0, 0);
this->layers[9]->createConvLayer(this->layers[8], 3, 3, 256, 384, 1, 1, 1, 1,
1);
this->layers[9]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv3_w");
this->layers[9]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv3_b");
this->layers[10]->createReLULayer(this->layers[9]);
this->layers[11]->createConvLayer(this->layers[10], 3, 3, 192, 192, 1, 1, 1, 1,
2);
this->layers[11]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv4_w");
this->layers[11]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv4_b");
this->layers[12]->createReLULayer(this->layers[11]);
this->layers[13]->createConvLayer(this->layers[12], 3, 3, 192, 128, 1, 1, 1, 1,
2);
this->layers[13]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv5_w");
this->layers[13]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_conv5_b");
this->layers[14]->createReLULayer(this->layers[13]);
this->layers[15]->createMaxPoolingLayer(this->layers[14], 3, 3, 2, 2, 0, 0);
this->layers[16]->createFCLayer(this->layers[15], 9216, 4096);
this->layers[16]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc6_w");
this->layers[16]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc6_b");
this->layers[17]->createReLULayer(this->layers[16]);
this->layers[18]->createPassthroughLayer(this->layers[17]);
this->layers[19]->createFCLayer(this->layers[18], 4096, 4096);
this->layers[19]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc7_w");
this->layers[19]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc7_b");
this->layers[20]->createReLULayer(this->layers[19]);
this->layers[21]->createPassthroughLayer(this->layers[20]);
this->layers[22]->createFCLayer(this->layers[21], 4096, 1000);
this->layers[22]->loadWeights(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc8_w");
this->layers[22]->loadBias(
"C:\\Sumpurn\\gpucoderdemo_alexnet_live\\codegen\\mex\\alexnet_predict\\cnn_alexnet_fc8_b");
this->layers[23]->createSoftmaxLayer(this->layers[22]);
this->layers[24]->createOutputLayer(this->layers[23]);
this->layers[24]->createWorkSpace((&this->workSpace));
for (idx_ws = 0; idx_ws < 25; idx_ws++) {
this->layers[idx_ws]->setWorkSpace(this->workSpace);
}
this->inputData = this->layers[0]->getData();
this->outputData = this->layers[24]->getData();
}
/* End of code generation (DeepLearningNetwork.cu) */
|
59873c9f2a29f7d251fe1c4d7551e14e830179c8.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include "tex.h"
template<typename T>
void TexVec<T>::Free(){
if(cuArray){
hipFreeArray((hipArray*)cuArray);
}
if(texObj){
hipDestroyTextureObject(*(hipTextureObject_t*)texObj);
free(texObj);
}
}
template void TexVec<float>::Free();
template void TexVec<double>::Free();
template<typename T>
void TexVec<T>::SetTexVec(int m,T* data){
}
template<> void TexVec<float>::SetTexVec(int m,float* data){
hipError_t err;
Free();
texObj = NULL;
cuArray = NULL;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,0,0,0,hipChannelFormatKindFloat);
int size = m * sizeof(float);
/*
int width = m;
int height = 1;
err = hipMallocArray(((hipArray**)(&cuArray)),&channelDesc,width,height);
if(err != hipSuccess){ fprintf(stderr,"hipMallocArray failed; size=%d\n",m);return; }
err = hipMemcpyToArray((hipArray*)cuArray,0,0,data,size,hipMemcpyHostToDevice);
if(err != hipSuccess){ fprintf(stderr,"hipMemcpyToArray failed\n");return; }
*/
struct hipResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
// resDesc.resType = hipResourceTypeArray;
// resDesc.res.array.array = (hipArray*)cuArray;
resDesc.resType = hipResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)data;
resDesc.res.linear.desc = channelDesc;
resDesc.res.linear.sizeInBytes = size;
struct hipTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.addressMode[0] = hipAddressModeWrap;
texDesc.addressMode[1] = hipAddressModeWrap;
texDesc.filterMode = hipFilterModePoint;
texDesc.readMode = hipReadModeElementType;
texDesc.normalizedCoords = 0;
texObj = malloc(sizeof(hipTextureObject_t));
err = hipCreateTextureObject((hipTextureObject_t*)texObj,&resDesc,&texDesc,NULL);
if(err != hipSuccess){ fprintf(stderr,"hipCreateTextureObject failed\n");return; }
}
template<> void TexVec<double>::SetTexVec(int m,double* data){
Free();
hipChannelFormatDesc channelDesc = hipCreateChannelDesc(32,32,0,0,hipChannelFormatKindSigned);
}
| 59873c9f2a29f7d251fe1c4d7551e14e830179c8.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include "tex.h"
template<typename T>
void TexVec<T>::Free(){
if(cuArray){
cudaFreeArray((cudaArray*)cuArray);
}
if(texObj){
cudaDestroyTextureObject(*(cudaTextureObject_t*)texObj);
free(texObj);
}
}
template void TexVec<float>::Free();
template void TexVec<double>::Free();
template<typename T>
void TexVec<T>::SetTexVec(int m,T* data){
}
template<> void TexVec<float>::SetTexVec(int m,float* data){
cudaError_t err;
Free();
texObj = NULL;
cuArray = NULL;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,0,0,0,cudaChannelFormatKindFloat);
int size = m * sizeof(float);
/*
int width = m;
int height = 1;
err = cudaMallocArray(((cudaArray**)(&cuArray)),&channelDesc,width,height);
if(err != cudaSuccess){ fprintf(stderr,"cudaMallocArray failed; size=%d\n",m);return; }
err = cudaMemcpyToArray((cudaArray*)cuArray,0,0,data,size,cudaMemcpyHostToDevice);
if(err != cudaSuccess){ fprintf(stderr,"cudaMemcpyToArray failed\n");return; }
*/
struct cudaResourceDesc resDesc;
memset(&resDesc,0,sizeof(resDesc));
// resDesc.resType = cudaResourceTypeArray;
// resDesc.res.array.array = (cudaArray*)cuArray;
resDesc.resType = cudaResourceTypeLinear;
resDesc.res.linear.devPtr = (void*)data;
resDesc.res.linear.desc = channelDesc;
resDesc.res.linear.sizeInBytes = size;
struct cudaTextureDesc texDesc;
memset(&texDesc,0,sizeof(texDesc));
texDesc.addressMode[0] = cudaAddressModeWrap;
texDesc.addressMode[1] = cudaAddressModeWrap;
texDesc.filterMode = cudaFilterModePoint;
texDesc.readMode = cudaReadModeElementType;
texDesc.normalizedCoords = 0;
texObj = malloc(sizeof(cudaTextureObject_t));
err = cudaCreateTextureObject((cudaTextureObject_t*)texObj,&resDesc,&texDesc,NULL);
if(err != cudaSuccess){ fprintf(stderr,"cudaCreateTextureObject failed\n");return; }
}
template<> void TexVec<double>::SetTexVec(int m,double* data){
Free();
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc(32,32,0,0,cudaChannelFormatKindSigned);
}
|
0e389b2474fb836a840f46f3472d180e47c5f425.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Author:
* Yixin Li, Email: [email protected]
* convert the image from LAB to RGB
*/
__global__ void lab_to_rgb( double * img, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
double L = img[3*t];
double La = img[3*t+1];
double Lb = img[3*t+2];
if (L!=L || La!=La || Lb!=Lb) return;
//convert from LAB to XYZ
double fy = (L+16) / 116;
double fx = La/500 + fy;
double fz = fy-Lb/200;
double x,y,z;
double xcube = pow(fx,3);
double ycube = pow(fy,3);
double zcube = pow(fz,3);
if (ycube>0.008856) y = ycube;
else y = (fy-16.0/116.0)/7.787;
if (xcube>0.008856) x = xcube;
else x = (fx - 16.0/116.0)/7.787;
if (zcube>0.008856) z = zcube;
else z = (fz - 16.0/116.0)/7.787;
double X = 0.950456 * x;
double Y = 1.000 * y;
double Z = 1.088754 * z;
//convert from XYZ to rgb
double R = X * 3.2406 + Y * -1.5372 + Z * -0.4986;
double G = X * -0.9689 + Y * 1.8758 + Z * 0.0415;
double B = X * 0.0557 + Y * -0.2040 + Z * 1.0570;
double r,g,b;
if (R>0.0031308) r = 1.055 * (pow(R,(1.0/2.4))) - 0.055;
else r = 12.92 * R;
if (G>0.0031308) g = 1.055 * ( pow(G,(1.0/2.4))) - 0.055;
else g= 12.92 * G;
if (B>0.0031308) b = 1.055 * (pow(B, (1.0/2.4))) - 0.055;
else b = 12.92 * B;
img[3*t] = min(255.0, r * 255.0);
img[3*t+1] = min(255.0, g * 255.0);
img[3*t+2] = min(255.0, b * 255.0);
} | 0e389b2474fb836a840f46f3472d180e47c5f425.cu | /*
* Author:
* Yixin Li, Email: [email protected]
* convert the image from LAB to RGB
*/
__global__ void lab_to_rgb( double * img, const int nPts) {
// getting the index of the pixel
const int t = threadIdx.x + blockIdx.x * blockDim.x;
if (t>=nPts) return;
double L = img[3*t];
double La = img[3*t+1];
double Lb = img[3*t+2];
if (L!=L || La!=La || Lb!=Lb) return;
//convert from LAB to XYZ
double fy = (L+16) / 116;
double fx = La/500 + fy;
double fz = fy-Lb/200;
double x,y,z;
double xcube = pow(fx,3);
double ycube = pow(fy,3);
double zcube = pow(fz,3);
if (ycube>0.008856) y = ycube;
else y = (fy-16.0/116.0)/7.787;
if (xcube>0.008856) x = xcube;
else x = (fx - 16.0/116.0)/7.787;
if (zcube>0.008856) z = zcube;
else z = (fz - 16.0/116.0)/7.787;
double X = 0.950456 * x;
double Y = 1.000 * y;
double Z = 1.088754 * z;
//convert from XYZ to rgb
double R = X * 3.2406 + Y * -1.5372 + Z * -0.4986;
double G = X * -0.9689 + Y * 1.8758 + Z * 0.0415;
double B = X * 0.0557 + Y * -0.2040 + Z * 1.0570;
double r,g,b;
if (R>0.0031308) r = 1.055 * (pow(R,(1.0/2.4))) - 0.055;
else r = 12.92 * R;
if (G>0.0031308) g = 1.055 * ( pow(G,(1.0/2.4))) - 0.055;
else g= 12.92 * G;
if (B>0.0031308) b = 1.055 * (pow(B, (1.0/2.4))) - 0.055;
else b = 12.92 * B;
img[3*t] = min(255.0, r * 255.0);
img[3*t+1] = min(255.0, g * 255.0);
img[3*t+2] = min(255.0, b * 255.0);
} |
d708acee40fe8685aa5aab87479de217402875fd.hip | // !!! This is a file automatically generated by hipify!!!
//----------------------------------------
//- Hello CUDA
//-
//----------------------------------------
//
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#define DATLEN 16
#define NSTREAM 4
int Buf[NSTREAM][DATLEN];
int *Data;
int *GPUMem[NSTREAM];
//----------------------------------------
//- GPU
//- __global__
//-
//----------------------------------------
//
__global__ void GPUadd(int *src, int len, int dat)
{
int threads,thnum;
int start, size;
start = gridDim.x;
threads = gridDim.x * blockDim.x; //
size = len / threads; //
thnum = blockDim.x * blockIdx.x + threadIdx.x; //
start = size * thnum; //
for (int i = 0; i < size; i++) //
src[start++] += dat; // dat
return;
}
//----------------------------------------
//-
//----------------------------------------
//
void DispData(const char *s, int *dat)
{
printf("%s\n",s); //
for (int j=0; j<NSTREAM; j++) {
for (int i=0; i<DATLEN; i++) //
printf("%02d ",*dat++); //
printf("\n"); //
}
printf("\n"); //
}
//----------------------------------------
//-
//----------------------------------------
//
int main(int argc, char *argv[])
{
int i;
size_t DataBytes;
hipStream_t Stream[NSTREAM];
printf("[%s]\n",argv[0]);
printf("Welcom to CUDA!\n"); //
for (int i=0; i<NSTREAM; i++)
hipStreamCreate(&Stream[i]);
DataBytes = sizeof(int) * DATLEN; //
Data = Buf[0];
#ifndef SINGLE
hipHostMalloc(&Data, DataBytes*NSTREAM); // GPU
#else
Data =(int*) malloc(DataBytes*NSTREAM);
#endif
for (int j=0; j<NSTREAM; j++) {
for (i=0; i<DATLEN;i++) // 10
*(Data+j*DATLEN+i) = i+10+j; //
}
DispData("GPU IN :-", Data); //
for (int i=0; i<NSTREAM; i++)
hipMalloc((void **)&GPUMem[i], DataBytes); // GPU
int c_time = clock();
for (int j=0; j<10000; j++) {
for (int i=0; i<NSTREAM; i++) {
#ifndef SINGLE
hipMemcpyAsync(GPUMem[i], Data+i*DATLEN, // Data[]
DataBytes,hipMemcpyHostToDevice,Stream[i]);
#else
hipMemcpy(GPUMem[i], Data+i*DATLEN, // Data[]
DataBytes,hipMemcpyHostToDevice);
#endif
#ifndef SINGLE
hipLaunchKernelGGL(( GPUadd), dim3(2), dim3(4),0, Stream[i], GPUMem[i], DATLEN, 3); //
#else
hipLaunchKernelGGL(( GPUadd), dim3(2), dim3(4),0, 0, GPUMem[i], DATLEN, 3); //
#endif
#ifndef SINGLE
hipMemcpyAsync(Data+i*DATLEN, GPUMem[i], DataBytes, // Stream[i]
hipMemcpyDeviceToHost, Stream[i]);
#else
hipMemcpy(Data+i*DATLEN, GPUMem[i], DataBytes, // Data[]
hipMemcpyDeviceToHost);
#endif
}
#ifdef SYNC
for (int i=0; i<NSTREAM; i++)
hipStreamSynchronize(Stream[i]); //
#endif
}
c_time = clock()-c_time;
printf("Time:-%d\n",c_time);
DispData("GPU OUT:-", Data); //
printf("Congraturations!\n"); //
return 0;
}
| d708acee40fe8685aa5aab87479de217402875fd.cu | //----------------------------------------
//- Hello CUDA
//- マルチストリームバージョン
//----------------------------------------
//
#include <stdio.h>
#include <stdlib.h>
#include <cuda.h>
#include <cuda_runtime.h>
#define DATLEN 16
#define NSTREAM 4
int Buf[NSTREAM][DATLEN];
int *Data;
int *GPUMem[NSTREAM];
//----------------------------------------
//- GPUで実行される部分
//- 関数名の前に__global__を付ける
//- 引数は呼び出し側で自由に設定できる
//----------------------------------------
//
__global__ void GPUadd(int *src, int len, int dat)
{
int threads,thnum;
int start, size;
start = gridDim.x;
threads = gridDim.x * blockDim.x; // 総スレッド数
size = len / threads; // 1スレッドの担当データ数
thnum = blockDim.x * blockIdx.x + threadIdx.x; // このスレッドの通し番号
start = size * thnum; // このスレッドが担当する先頭位置
for (int i = 0; i < size; i++) // 耐えられたデータ長分だけ
src[start++] += dat; // 配列にdat値を加算
return;
}
//----------------------------------------
//- 配列データ表示
//----------------------------------------
//
void DispData(const char *s, int *dat)
{
printf("%s\n",s); // データのキャプション
for (int j=0; j<NSTREAM; j++) {
for (int i=0; i<DATLEN; i++) // 配列データサイズ分全部表示
printf("%02d ",*dat++); // 表示して
printf("\n"); // 最後に改行しておく
}
printf("\n"); // 最後に改行しておく
}
//----------------------------------------
//- メイン
//----------------------------------------
//
int main(int argc, char *argv[])
{
int i;
size_t DataBytes;
cudaStream_t Stream[NSTREAM];
printf("[%s]\n",argv[0]);
printf("Welcom to CUDA!\n"); // ようこそ!
for (int i=0; i<NSTREAM; i++)
cudaStreamCreate(&Stream[i]);
DataBytes = sizeof(int) * DATLEN; // 配列データの総バイト数を計算して
Data = Buf[0];
#ifndef SINGLE
cudaMallocHost(&Data, DataBytes*NSTREAM); // GPUとの共有メモリ領域から転送サイズ分を確保
#else
Data =(int*) malloc(DataBytes*NSTREAM);
#endif
for (int j=0; j<NSTREAM; j++) {
for (i=0; i<DATLEN;i++) // 初期値は10からのインクリメントデータにした
*(Data+j*DATLEN+i) = i+10+j; // (別になんでも良いのだけど)
}
DispData("GPU IN :-", Data); // とりあえず中身を表示しておく
for (int i=0; i<NSTREAM; i++)
cudaMalloc((void **)&GPUMem[i], DataBytes); // GPUとの共有メモリ領域から転送サイズ分を確保
int c_time = clock();
for (int j=0; j<10000; j++) {
for (int i=0; i<NSTREAM; i++) {
#ifndef SINGLE
cudaMemcpyAsync(GPUMem[i], Data+i*DATLEN, // Data[]を共有メモリにコピー
DataBytes,cudaMemcpyHostToDevice,Stream[i]);
#else
cudaMemcpy(GPUMem[i], Data+i*DATLEN, // Data[]を共有メモリにコピー
DataBytes,cudaMemcpyHostToDevice);
#endif
#ifndef SINGLE
GPUadd<<<2, 4,0, Stream[i]>>>(GPUMem[i], DATLEN, 3); // マルチストリームで実行
#else
GPUadd<<<2, 4,0>>>(GPUMem[i], DATLEN, 3); // シングルストリームで実行
#endif
#ifndef SINGLE
cudaMemcpyAsync(Data+i*DATLEN, GPUMem[i], DataBytes, // Stream[i]の担当分のデータをコピー
cudaMemcpyDeviceToHost, Stream[i]);
#else
cudaMemcpy(Data+i*DATLEN, GPUMem[i], DataBytes, // 完了してから共有メモリからData[]にコピー
cudaMemcpyDeviceToHost);
#endif
}
#ifdef SYNC
for (int i=0; i<NSTREAM; i++)
cudaStreamSynchronize(Stream[i]); // 同期したければ
#endif
}
c_time = clock()-c_time;
printf("Time:-%d\n",c_time);
DispData("GPU OUT:-", Data); // 中身を表示
printf("Congraturations!\n"); // おめでとうございます!
return 0;
}
|
ee974b14f5e693277e7b111ac7e8c4e06431e216.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cstdio>
#define DLIMIT 99999999
// Cluster Center
//
// float* f; // vector of size #channels
// float x, y, z;
//
#define __min(a, b) (((a) < (b)) ? (a) : (b))
#define __max(a, b) (((a) >= (b)) ? (a) : (b))
/*
* P = point
* S = data shape
* F = data # features
*/
__device__
float at(const float* data, const int4& P, const int3& S) {
long s2d = S.y * S.x, s3d = S.z * S.y * S.x;
return data[P.w * s3d + P.z * s2d + P.y * S.x + P.x];
}
__device__
float gradient(const float* data, int4& P, const int3& S, int nf) {
float d;
float3 diff;
int4 q; q.z = P.z; q.y = P.y; q.x = P.x;
for ( int k = 0; k < nf; k++ ) {
q.w = P.w = k;
q.x = P.x + 1;
d = at(data, P, S) - at(data, q, S);
diff.x += d * d;
q.x = P.x; q.y = P.y + 1;
d = at(data, P, S) - at(data, q, S);
diff.y += d * d;
q.y = P.y; q.z = P.z + 1;
d = at(data, P, S) - at(data, q, S);
diff.z += d * d;
}
return diff.x + diff.y + diff.z;
}
__global__
void init_clusters(const float* data,
float* centers,
const int n_clusters,
const int n_features,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
long lidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( lidx >= n_clusters ) {
return;
}
int3 idx;
int plane = sp_grid.y * sp_grid.x;
idx.z = lidx / plane;
int aux = lidx % plane;
idx.y = aux / sp_grid.x;
idx.x = aux % sp_grid.x;
int3 jdx;
int volume_linear_idx = lidx;
jdx.z = volume_linear_idx / (sp_grid.x * sp_grid.y);
int plane_linear_idx = volume_linear_idx - jdx.z * sp_grid.x * sp_grid.y;
jdx.y = plane_linear_idx / sp_grid.x;
jdx.x = plane_linear_idx % sp_grid.x;
int4 p, q, r;
p.z = r.z = idx.z * sp_shape.z + sp_shape.z / 2;
p.y = r.y = idx.y * sp_shape.y + sp_shape.y / 2;
p.x = r.x = idx.x * sp_shape.x + sp_shape.x / 2;
int shift = n_features + 3;
centers[lidx * shift + n_features + 0] = r.z;
centers[lidx * shift + n_features + 1] = r.y;
centers[lidx * shift + n_features + 2] = r.x;
}
__global__
void expectation(const float* data,
const float* centers,
unsigned int* labels,
const float m, const float S,
const int n_clusters,
const int n_features,
const float3 spacing,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
int4 idx, p, q;
long gidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( gidx >= im_shape.x * im_shape.y * im_shape.z ) {
return;
}
// linear index to 3D pixel index transformation
int plane = im_shape.y * im_shape.x;
int aux = gidx % plane;
idx.z = gidx / plane;
idx.y = aux / im_shape.x;
idx.x = aux % im_shape.x;
// approx center grid positoin
p.z = __max(0, __min(idx.z / sp_shape.z, sp_grid.z - 1));
p.y = __max(0, __min(idx.y / sp_shape.y, sp_grid.y - 1));
p.x = __max(0, __min(idx.x / sp_shape.x, sp_grid.x - 1));
float min_d = DLIMIT, d, dist, adiff, pdiff;
int R = 2, cshift = n_features + 3;
long cidx, ridx = 0;
for ( int k = -R; k <= R; k++ ) {
q.z = p.z + k;
if ( q.z < 0 || q.z >= sp_grid.z ) {continue;}
for ( int i = -R; i <= R; i++ ) {
q.y = p.y + i;
if ( q.y < 0 || q.y >= sp_grid.y ) {continue;}
for ( int j = -R; j <= R; j++ ) {
q.x = p.x + j;
if ( q.x < 0 || q.x >= sp_grid.x ) {continue;}
cidx = q.z * sp_grid.y * sp_grid.x + q.y * sp_grid.x + q.x;
if ( centers[cidx * cshift] == DLIMIT ) {
continue;
}
// Appearance diff
adiff = 0;
for ( int w = 0; w < n_features; w++ ) {
idx.w = w;
d = at(data, idx, im_shape) - centers[cidx * cshift + w];
adiff += d * d;
}
// Position diff
float3 pd;
pd.z = (idx.z - centers[cidx * cshift + n_features + 0]) * spacing.z;
pd.y = (idx.y - centers[cidx * cshift + n_features + 1]) * spacing.y;
pd.x = (idx.x - centers[cidx * cshift + n_features + 2]) * spacing.x;
pdiff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x;
dist = adiff / (m * m * n_features * n_features) + pdiff / (S * S);
// Wrapup
if ( dist < min_d ) {
min_d = dist;
ridx = cidx;
}
}
}
}
labels[gidx] = ridx + 1;
}
__global__
void maximization(const float* data,
const unsigned int* labels,
float* centers,
int n_clusters,
int n_features,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
long lidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( lidx >= n_clusters ) {
return;
}
long cshift = n_features + 3;
int3 cidx;
cidx.z = (int) centers[lidx * cshift + n_features + 0];
cidx.y = (int) centers[lidx * cshift + n_features + 1];
cidx.x = (int) centers[lidx * cshift + n_features + 2];
float ratio = 2.0f;
int3 from;
from.z = __max(cidx.z - sp_shape.z * ratio, 0);
from.y = __max(cidx.y - sp_shape.y * ratio, 0);
from.x = __max(cidx.x - sp_shape.x * ratio, 0);
int3 to;
to.z = __min(cidx.z + sp_shape.z * ratio, im_shape.z);
to.y = __min(cidx.y + sp_shape.y * ratio, im_shape.y);
to.x = __min(cidx.x + sp_shape.x * ratio, im_shape.x);
int4 p;
float* f = new float[cshift];
for ( int k = 0; k < cshift; k++ ) {f[k] = 0;}
long count = 0, offset, s2d = im_shape.x * im_shape.y;
for ( p.z = from.z; p.z < to.z; p.z++ ) {
for ( p.y = from.y; p.y < to.y; p.y++ ) {
for ( p.x = from.x; p.x < to.x; p.x++ ) {
offset = p.z * s2d + p.y * im_shape.x + p.x;
if ( labels[offset] == lidx + 1 ) {
for ( int w = 0; w < n_features; w++ ) {
p.w = w;
f[w] += at(data, p, im_shape);
}
f[n_features + 0] += p.z;
f[n_features + 1] += p.y;
f[n_features + 2] += p.x;
count += 1;
}
}
}
}
if ( count > 0 ) {
for ( int w = 0; w < cshift; w++ ) {
centers[lidx * cshift + w] = f[w] / count;
}
} else {
centers[lidx * cshift] = DLIMIT;
}
delete[] f;
} | ee974b14f5e693277e7b111ac7e8c4e06431e216.cu |
#include <cstdio>
#define DLIMIT 99999999
// Cluster Center
//
// float* f; // vector of size #channels
// float x, y, z;
//
#define __min(a, b) (((a) < (b)) ? (a) : (b))
#define __max(a, b) (((a) >= (b)) ? (a) : (b))
/*
* P = point
* S = data shape
* F = data # features
*/
__device__
float at(const float* data, const int4& P, const int3& S) {
long s2d = S.y * S.x, s3d = S.z * S.y * S.x;
return data[P.w * s3d + P.z * s2d + P.y * S.x + P.x];
}
__device__
float gradient(const float* data, int4& P, const int3& S, int nf) {
float d;
float3 diff;
int4 q; q.z = P.z; q.y = P.y; q.x = P.x;
for ( int k = 0; k < nf; k++ ) {
q.w = P.w = k;
q.x = P.x + 1;
d = at(data, P, S) - at(data, q, S);
diff.x += d * d;
q.x = P.x; q.y = P.y + 1;
d = at(data, P, S) - at(data, q, S);
diff.y += d * d;
q.y = P.y; q.z = P.z + 1;
d = at(data, P, S) - at(data, q, S);
diff.z += d * d;
}
return diff.x + diff.y + diff.z;
}
__global__
void init_clusters(const float* data,
float* centers,
const int n_clusters,
const int n_features,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
long lidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( lidx >= n_clusters ) {
return;
}
int3 idx;
int plane = sp_grid.y * sp_grid.x;
idx.z = lidx / plane;
int aux = lidx % plane;
idx.y = aux / sp_grid.x;
idx.x = aux % sp_grid.x;
int3 jdx;
int volume_linear_idx = lidx;
jdx.z = volume_linear_idx / (sp_grid.x * sp_grid.y);
int plane_linear_idx = volume_linear_idx - jdx.z * sp_grid.x * sp_grid.y;
jdx.y = plane_linear_idx / sp_grid.x;
jdx.x = plane_linear_idx % sp_grid.x;
int4 p, q, r;
p.z = r.z = idx.z * sp_shape.z + sp_shape.z / 2;
p.y = r.y = idx.y * sp_shape.y + sp_shape.y / 2;
p.x = r.x = idx.x * sp_shape.x + sp_shape.x / 2;
int shift = n_features + 3;
centers[lidx * shift + n_features + 0] = r.z;
centers[lidx * shift + n_features + 1] = r.y;
centers[lidx * shift + n_features + 2] = r.x;
}
__global__
void expectation(const float* data,
const float* centers,
unsigned int* labels,
const float m, const float S,
const int n_clusters,
const int n_features,
const float3 spacing,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
int4 idx, p, q;
long gidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( gidx >= im_shape.x * im_shape.y * im_shape.z ) {
return;
}
// linear index to 3D pixel index transformation
int plane = im_shape.y * im_shape.x;
int aux = gidx % plane;
idx.z = gidx / plane;
idx.y = aux / im_shape.x;
idx.x = aux % im_shape.x;
// approx center grid positoin
p.z = __max(0, __min(idx.z / sp_shape.z, sp_grid.z - 1));
p.y = __max(0, __min(idx.y / sp_shape.y, sp_grid.y - 1));
p.x = __max(0, __min(idx.x / sp_shape.x, sp_grid.x - 1));
float min_d = DLIMIT, d, dist, adiff, pdiff;
int R = 2, cshift = n_features + 3;
long cidx, ridx = 0;
for ( int k = -R; k <= R; k++ ) {
q.z = p.z + k;
if ( q.z < 0 || q.z >= sp_grid.z ) {continue;}
for ( int i = -R; i <= R; i++ ) {
q.y = p.y + i;
if ( q.y < 0 || q.y >= sp_grid.y ) {continue;}
for ( int j = -R; j <= R; j++ ) {
q.x = p.x + j;
if ( q.x < 0 || q.x >= sp_grid.x ) {continue;}
cidx = q.z * sp_grid.y * sp_grid.x + q.y * sp_grid.x + q.x;
if ( centers[cidx * cshift] == DLIMIT ) {
continue;
}
// Appearance diff
adiff = 0;
for ( int w = 0; w < n_features; w++ ) {
idx.w = w;
d = at(data, idx, im_shape) - centers[cidx * cshift + w];
adiff += d * d;
}
// Position diff
float3 pd;
pd.z = (idx.z - centers[cidx * cshift + n_features + 0]) * spacing.z;
pd.y = (idx.y - centers[cidx * cshift + n_features + 1]) * spacing.y;
pd.x = (idx.x - centers[cidx * cshift + n_features + 2]) * spacing.x;
pdiff = pd.z * pd.z + pd.y * pd.y + pd.x * pd.x;
dist = adiff / (m * m * n_features * n_features) + pdiff / (S * S);
// Wrapup
if ( dist < min_d ) {
min_d = dist;
ridx = cidx;
}
}
}
}
labels[gidx] = ridx + 1;
}
__global__
void maximization(const float* data,
const unsigned int* labels,
float* centers,
int n_clusters,
int n_features,
const int3 sp_grid,
const int3 sp_shape,
const int3 im_shape)
{
long lidx = threadIdx.x + (blockIdx.x * blockDim.x);
if ( lidx >= n_clusters ) {
return;
}
long cshift = n_features + 3;
int3 cidx;
cidx.z = (int) centers[lidx * cshift + n_features + 0];
cidx.y = (int) centers[lidx * cshift + n_features + 1];
cidx.x = (int) centers[lidx * cshift + n_features + 2];
float ratio = 2.0f;
int3 from;
from.z = __max(cidx.z - sp_shape.z * ratio, 0);
from.y = __max(cidx.y - sp_shape.y * ratio, 0);
from.x = __max(cidx.x - sp_shape.x * ratio, 0);
int3 to;
to.z = __min(cidx.z + sp_shape.z * ratio, im_shape.z);
to.y = __min(cidx.y + sp_shape.y * ratio, im_shape.y);
to.x = __min(cidx.x + sp_shape.x * ratio, im_shape.x);
int4 p;
float* f = new float[cshift];
for ( int k = 0; k < cshift; k++ ) {f[k] = 0;}
long count = 0, offset, s2d = im_shape.x * im_shape.y;
for ( p.z = from.z; p.z < to.z; p.z++ ) {
for ( p.y = from.y; p.y < to.y; p.y++ ) {
for ( p.x = from.x; p.x < to.x; p.x++ ) {
offset = p.z * s2d + p.y * im_shape.x + p.x;
if ( labels[offset] == lidx + 1 ) {
for ( int w = 0; w < n_features; w++ ) {
p.w = w;
f[w] += at(data, p, im_shape);
}
f[n_features + 0] += p.z;
f[n_features + 1] += p.y;
f[n_features + 2] += p.x;
count += 1;
}
}
}
}
if ( count > 0 ) {
for ( int w = 0; w < cshift; w++ ) {
centers[lidx * cshift + w] = f[w] / count;
}
} else {
centers[lidx * cshift] = DLIMIT;
}
delete[] f;
} |
647ec4c8e6a141c20de00d647d7ccbac2f374267.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_minus_2_back [3][2];
static int dims_update_halo_kernel2_zvel_minus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_back_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = -zvel0(0,0,2);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = -zvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_zvel_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[0][0] * dims_update_halo_kernel2_zvel_minus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[1][0] * dims_update_halo_kernel2_zvel_minus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_minus_2_back[0][0], dims_update_halo_kernel2_zvel_minus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_minus_2_back[1][0], dims_update_halo_kernel2_zvel_minus_2_back[1][1], arg1);
update_halo_kernel2_zvel_minus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,56)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel2_zvel_minus_2_back");
OPS_kernels[56].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_minus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_minus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_minus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_minus_2_back_h[1][1]) {
dims_update_halo_kernel2_zvel_minus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_minus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_minus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_minus_2_back_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_zvel_minus_2_back, dims_update_halo_kernel2_zvel_minus_2_back_h, sizeof(dims_update_halo_kernel2_zvel_minus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_minus_2_back), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[56].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 56;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 56;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel2_zvel_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
| 647ec4c8e6a141c20de00d647d7ccbac2f374267.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_zvel_minus_2_back [3][2];
static int dims_update_halo_kernel2_zvel_minus_2_back_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_zvel_minus_2_back_gpu(ACC<double> &zvel0,
ACC<double> &zvel1,
const int* fields)
{
if(fields[FIELD_ZVEL0] == 1) zvel0(0,0,0) = -zvel0(0,0,2);
if(fields[FIELD_ZVEL1] == 1) zvel1(0,0,0) = -zvel1(0,0,2);
}
__global__ void ops_update_halo_kernel2_zvel_minus_2_back(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[0][0] * dims_update_halo_kernel2_zvel_minus_2_back[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_zvel_minus_2_back[1][0] * dims_update_halo_kernel2_zvel_minus_2_back[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_zvel_minus_2_back[0][0], dims_update_halo_kernel2_zvel_minus_2_back[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_zvel_minus_2_back[1][0], dims_update_halo_kernel2_zvel_minus_2_back[1][1], arg1);
update_halo_kernel2_zvel_minus_2_back_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,56)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel2_zvel_minus_2_back");
OPS_kernels[56].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_zvel_minus_2_back_h[0][0] || ydim0 != dims_update_halo_kernel2_zvel_minus_2_back_h[0][1] || xdim1 != dims_update_halo_kernel2_zvel_minus_2_back_h[1][0] || ydim1 != dims_update_halo_kernel2_zvel_minus_2_back_h[1][1]) {
dims_update_halo_kernel2_zvel_minus_2_back_h[0][0] = xdim0;
dims_update_halo_kernel2_zvel_minus_2_back_h[0][1] = ydim0;
dims_update_halo_kernel2_zvel_minus_2_back_h[1][0] = xdim1;
dims_update_halo_kernel2_zvel_minus_2_back_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_zvel_minus_2_back, dims_update_halo_kernel2_zvel_minus_2_back_h, sizeof(dims_update_halo_kernel2_zvel_minus_2_back)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_zvel_minus_2_back<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[56].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[56].mpi_time += t2-t1;
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[56].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_zvel_minus_2_back(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 56;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 56;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_zvel_minus_2_back_execute;
if (OPS_diags > 1) {
ops_timing_realloc(56,"update_halo_kernel2_zvel_minus_2_back");
}
ops_enqueue_kernel(desc);
}
#endif
|
c53b32540235fb0ce103ff80ceeca723659a8027.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <hip/hip_complex.h>
#include <fftw3.h>
#include <hipfft.h>
#include <sys/time.h>
#include <assert.h>
using namespace std;
#define k_rangeres 30
#define k_calib 1941.05
#define RESULT_SIZE 2
#define DEBUG
inline
hipError_t checkCuda(hipError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
float *generate_hamming_coef(int m, int n) {
// Calculate normalization power on range cell
float p_range=0;
for(int i=0; i < m; i++) {
p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0);
}
p_range=p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler=0;
for(int j=0; j < n; j++) {
p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0);
}
p_doppler=p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float K_wind = -1/(16383.5*m*n*sqrt(50));
const float c = K_wind/sqrt(p_range*p_doppler);
// Generate elements
float *_hamming_coef= new float[m*n];
for(int i=0; i < m; i++) {
for(int j=0; j < n; j++) {
_hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c;
}
}
return _hamming_coef;
}
float *generate_ma_coef(int n){
float *_ma_coef = new float[n];
float _sum = 0.0;
for(int i=0; i < n; i++) {
_ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2);
_sum += _ma_coef[i];
}
for(int i=0; i < n; i++){
_ma_coef[i] = _ma_coef[i]/_sum;
}
return _ma_coef;
}
__global__ void __apply_hamming(cuFloatComplex *a, float *b) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx]));
}
__global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]);
}
__global__ void __conjugate(cuFloatComplex *a) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[i*n+j];
inout[i*n+j] = inout[i*n+(j+n/2)];
inout[i*n+(j+n/2)] = temp;
}
__global__ void __clip(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = n-threadIdx.x-1;
inout[i*n+j] = make_cuFloatComplex(0, 0);
}
__global__ void __abssqr(cuFloatComplex *inout, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf(inout[idx]);
imag = cuCimagf(inout[idx]);
inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0);
}
__global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
out[i*n] = sdata[j];
}
}
__global__ void __sum_inplace(cuFloatComplex *g_idata) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_inplace_v3(cuFloatComplex *in) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
in[i*n] = sdata[j];
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/n;
float avgy = sum[i*n].y/n;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1);
}
__global__ void __scale_real(cuFloatComplex *inout) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0);
}
__global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = blockIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
__global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = threadIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
void tick(timeval *begin) {
gettimeofday(begin, NULL);
}
void tock(timeval *begin, timeval *end, string caption) {
unsigned long long bb, e;
gettimeofday(end, NULL);
bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1;
e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1;
cout << caption << ": " << e-bb << endl;
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio(false);
struct timeval tb, te;
tick(&tb);
cuFloatComplex *iqhh, *iqvv, *iqhv;
float *result;
int sector_id;
const int m = 1024; // cell
const int n = 512; // sweep
const int ma_count = 7;
iqhh = new cuFloatComplex[m*n];
iqvv = new cuFloatComplex[m*n];
iqhv = new cuFloatComplex[m*n];
result = new float[(m/2)*RESULT_SIZE];
float a, b;
// Generate Hamming coefficients
const float *hamming_coef = generate_hamming_coef(m, n);
// Generate MA coefficients
float *ma_coef = generate_ma_coef(ma_count);
fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n);
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE);
for (int j=0; j<ma_count; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j=ma_count; j<n; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute(fft_ma_plan);
fftwf_destroy_plan(fft_ma_plan);
cuFloatComplex *fft_ma;
fft_ma = new cuFloatComplex[n];
for (int j=0; j<n; j++) {
fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]);
}
fftwf_free(_fft_ma);
// Device buffers
/*__constant__*/ float *d_hamming;
/*__constant__*/ cuFloatComplex *d_ma;
cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv;
cuFloatComplex *d_sum;
float *d_result;
//float *d_powhh, *d_powvv;
hipMalloc(&d_hamming, m*n*sizeof(float));
hipMalloc(&d_ma, n*sizeof(cuFloatComplex));
hipMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_sum, m*n*sizeof(cuFloatComplex));
hipMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float));
hipMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
// CUFFT initialization
hipfftHandle fft_range_handle;
hipfftHandle fft_doppler_handle;
hipfftHandle fft_pdop_handle;
int rank = 1; // --- 1D FFTs
int nn[] = { m }; // --- Size of the Fourier transform
int istride = n, ostride = n; // --- Distance between two successive input/output elements
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = n; // --- Number of batched executions
hipfftPlanMany(&fft_range_handle, rank, nn,
inembed, istride, idist,
onembed, ostride, odist, HIPFFT_C2C, batch);
hipfftPlan1d(&fft_doppler_handle, n, HIPFFT_C2C, m);
hipfftPlan1d(&fft_pdop_handle, n, HIPFFT_C2C, m/2);
tock(&tb, &te, "initialization");
float ms; // elapsed time in milliseconds
sector_id = -1;
// create events and streams
hipEvent_t startEvent, stopEvent;
hipEventCreate(&startEvent);
hipEventCreate(&stopEvent);
// hipEventCreate(&dummyEvent);
hipEventRecord(startEvent,0);
tick(&tb);
while(sector_id < 126) {
// tick(&tb);
// Read 1 sector data
// cin >> sector_id;
sector_id++;
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhh[i*n+j] = make_cuFloatComplex(i, j);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqvv[i*n+j] = make_cuFloatComplex(j, i);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhv[i*n+j] = make_cuFloatComplex(i, i);
}
}
hipMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
hipMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
hipMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), hipMemcpyHostToDevice);
// apply Hamming coefficients
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhh, d_hamming);
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqvv, d_hamming);
hipLaunchKernelGGL(( __apply_hamming), dim3(m),dim3(n), 0, 0, d_iqhv, d_hamming);
// FFT range profile
hipfftExecC2C(fft_range_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_range_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_range_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
// FFT+shift Doppler profile
hipLaunchKernelGGL(( __sum_v3), dim3(m),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhh, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhh, d_sum);
hipLaunchKernelGGL(( __sum_v3), dim3(m),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqvv, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqvv, d_sum);
hipLaunchKernelGGL(( __sum_v3), dim3(m),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhv, d_sum);
hipLaunchKernelGGL(( __avgconj), dim3(m),dim3(n), 0, 0, d_iqhv, d_sum);
hipfftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhh);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqvv);
hipLaunchKernelGGL(( __conjugate), dim3(m),dim3(n), 0, 0, d_iqhv);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __shift), dim3(m),dim3(n/2), 0, 0, d_iqhv, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __clip), dim3(m),dim3(2), 0, 0, d_iqhv, n);
// Get absolute value
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhh, n);
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqvv, n);
hipLaunchKernelGGL(( __abssqr), dim3(m/2),dim3(n), 0, 0, d_iqhv, n);
// FFT PDOP
hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_FORWARD);
hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_FORWARD);
hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_FORWARD);
// Apply MA coefficients
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhh, d_ma);
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqvv, d_ma);
hipLaunchKernelGGL(( __apply_ma), dim3(m/2),dim3(n), 0, 0, d_iqhv, d_ma);
// Inverse FFT
hipfftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, HIPFFT_BACKWARD);
hipfftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, HIPFFT_BACKWARD);
hipfftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, HIPFFT_BACKWARD);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhh);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqvv);
hipLaunchKernelGGL(( __scale_real), dim3(m/2),dim3(n), 0, 0, d_iqhv);
// Sum
hipLaunchKernelGGL(( __sum_inplace_v3), dim3(m/2),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhh);
hipLaunchKernelGGL(( __sum_inplace_v3), dim3(m/2),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqvv);
hipLaunchKernelGGL(( __sum_inplace_v3), dim3(m/2),dim3(n),n*sizeof(cuFloatComplex), 0, d_iqhv);
// hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x;
// float zdb = 10 * log10(z);
// float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x));
// cout << zdb << " " << zdr << endl;
// }
// exit(0);
// Calculate ZdB, Zdr
hipLaunchKernelGGL(( __calcresult_v2), dim3(1),dim3(m/2), 0, 0, d_iqhh, d_iqvv, d_iqhv, d_result, n);
hipMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), hipMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// for (int j=0; j<RESULT_SIZE; j++) {
// cout << result[i*RESULT_SIZE+j] << " ";
// }
// cout << endl;
// }
// exit(0);
}
tock(&tb, &te, "All (us)");
hipEventRecord(stopEvent, 0);
hipEventSynchronize(stopEvent);
hipEventElapsedTime(&ms, startEvent, stopEvent);
printf("Time for sequential transfer and execute (ms): %f\n", ms);
hipEventDestroy(startEvent);
hipEventDestroy(stopEvent);
hipFree(d_hamming);
hipFree(d_ma);
hipFree(d_iqhh);
hipFree(d_iqvv);
hipFree(d_iqhv);
delete[] iqhh;
delete[] iqvv;
delete[] iqhv;
return 0;
}
// hipMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// hipMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), hipMemcpyDeviceToHost);
// for (int i=0; i<m; i++) {
// for (int j=0; j<n; j++) {
// cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") ";
// }
// cout << endl;
// }
// // for (int i=0; i<m; i++) {
// // for (int j=0; j<n; j++) {
// // cout << iqvv[i*n+j].x << " ";
// // }
// // cout << endl;
// // }
// exit(0);
| c53b32540235fb0ce103ff80ceeca723659a8027.cu | #include <iostream>
#include <stdlib.h>
#include <cuda.h>
#include <cuComplex.h>
#include <fftw3.h>
#include <cufft.h>
#include <sys/time.h>
#include <assert.h>
using namespace std;
#define k_rangeres 30
#define k_calib 1941.05
#define RESULT_SIZE 2
#define DEBUG
inline
cudaError_t checkCuda(cudaError_t result) {
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
float *generate_hamming_coef(int m, int n) {
// Calculate normalization power on range cell
float p_range=0;
for(int i=0; i < m; i++) {
p_range=p_range+pow(0.53836-0.46164*cos(2*M_PI*(i)/(m-1)), 2.0);
}
p_range=p_range/m;
// Calculate normalization power on Doppler cell
float p_doppler=0;
for(int j=0; j < n; j++) {
p_doppler=p_doppler+pow(0.53836-0.46164*cos(2*M_PI*(j)/(n-1)), 2.0);
}
p_doppler=p_doppler/n;
// Constant since FFT is not normalized and the power is computed w.r.t. 50ohm
const float K_wind = -1/(16383.5*m*n*sqrt(50));
const float c = K_wind/sqrt(p_range*p_doppler);
// Generate elements
float *_hamming_coef= new float[m*n];
for(int i=0; i < m; i++) {
for(int j=0; j < n; j++) {
_hamming_coef[i*n+j] = (0.53836-0.46164*cos(2*M_PI*(i)/(m-1))) * (0.53836-0.46164*cos(2*M_PI*(j)/(n-1))) * c;
}
}
return _hamming_coef;
}
float *generate_ma_coef(int n){
float *_ma_coef = new float[n];
float _sum = 0.0;
for(int i=0; i < n; i++) {
_ma_coef[i]=exp(-(pow(i-((n-1)/2), 2.0))/2);
_sum += _ma_coef[i];
}
for(int i=0; i < n; i++){
_ma_coef[i] = _ma_coef[i]/_sum;
}
return _ma_coef;
}
__global__ void __apply_hamming(cuFloatComplex *a, float *b) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx] = make_cuFloatComplex(b[idx]*cuCrealf(a[idx]), b[idx]*cuCimagf(a[idx]));
}
__global__ void __apply_ma(cuFloatComplex *inout, cuFloatComplex *macoef) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = cuCmulf(inout[i*n+j], macoef[j]);
}
__global__ void __conjugate(cuFloatComplex *a) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
a[idx].y *= -1;
}
__global__ void __shift(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = threadIdx.x;
cuFloatComplex temp = inout[i*n+j];
inout[i*n+j] = inout[i*n+(j+n/2)];
inout[i*n+(j+n/2)] = temp;
}
__global__ void __clip(cuFloatComplex *inout, int n) {
const unsigned int i = blockIdx.x, j = n-threadIdx.x-1;
inout[i*n+j] = make_cuFloatComplex(0, 0);
}
__global__ void __abssqr(cuFloatComplex *inout, int n) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
float real, imag;
real = cuCrealf(inout[idx]);
imag = cuCimagf(inout[idx]);
inout[idx] = make_cuFloatComplex(real*real + imag*imag, 0);
}
__global__ void __sum(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
out[i*n+j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
out[i*n+j] = cuCaddf(out[i*n+j], out[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_v3(cuFloatComplex *in, cuFloatComplex *out) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
out[i*n] = sdata[j];
}
}
__global__ void __sum_inplace(cuFloatComplex *g_idata) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
// __syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
// g_idata[i] = make_cuFloatComplex(g_idata[i].x+g_idata[i + s].x, 0);
g_idata[i*n+j] = cuCaddf(g_idata[i*n+j], g_idata[i*n+j+s]);
}
__syncthreads();
}
}
__global__ void __sum_inplace_v3(cuFloatComplex *in) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
extern __shared__ cuFloatComplex sdata[];
sdata[j] = make_cuFloatComplex(in[i*n+j].x, in[i*n+j].y);
__syncthreads();
for (unsigned int s=blockDim.x/2; s>0; s>>=1) {
if (j < s) {
sdata[j] = cuCaddf(sdata[j], sdata[j+s]);
}
__syncthreads();
}
if(j==0) {
in[i*n] = sdata[j];
}
}
__global__ void __avgconj(cuFloatComplex *inout, cuFloatComplex *sum) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
float avgx = sum[i*n].x/n;
float avgy = sum[i*n].y/n;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x-avgx, (inout[i*n+j].y-avgy)*-1);
}
__global__ void __scale_real(cuFloatComplex *inout) {
const unsigned int i = blockIdx.x, j = threadIdx.x, n = blockDim.x;
inout[i*n+j] = make_cuFloatComplex(inout[i*n+j].x/n, 0);
}
__global__ void __calcresult(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = blockIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
__global__ void __calcresult_v2(cuFloatComplex *hh, cuFloatComplex *vv, cuFloatComplex *hv, float *out, int n) {
const unsigned int i = threadIdx.x;
float z = pow(i*k_rangeres, 2.0) * k_calib * hh[i*n].x;
float zdb = 10 * log10(z);
float zdr = 10 * (log10(hh[i*n].x)-log10(vv[i*n].x));
out[i*RESULT_SIZE+0] = zdb;
out[i*RESULT_SIZE+1] = zdr;
}
void tick(timeval *begin) {
gettimeofday(begin, NULL);
}
void tock(timeval *begin, timeval *end, string caption) {
unsigned long long bb, e;
gettimeofday(end, NULL);
bb = (unsigned long long)(begin->tv_sec) * 1000000 + (unsigned long long)(begin->tv_usec) / 1;
e = (unsigned long long)(end->tv_sec) * 1000000 + (unsigned long long)(end->tv_usec) / 1;
cout << caption << ": " << e-bb << endl;
}
int main(int argc, char **argv) {
ios_base::sync_with_stdio(false);
struct timeval tb, te;
tick(&tb);
cuFloatComplex *iqhh, *iqvv, *iqhv;
float *result;
int sector_id;
const int m = 1024; // cell
const int n = 512; // sweep
const int ma_count = 7;
iqhh = new cuFloatComplex[m*n];
iqvv = new cuFloatComplex[m*n];
iqhv = new cuFloatComplex[m*n];
result = new float[(m/2)*RESULT_SIZE];
float a, b;
// Generate Hamming coefficients
const float *hamming_coef = generate_hamming_coef(m, n);
// Generate MA coefficients
float *ma_coef = generate_ma_coef(ma_count);
fftwf_complex *_fft_ma = (fftwf_complex*) fftwf_malloc(sizeof(fftwf_complex) * n);
fftwf_plan fft_ma_plan = fftwf_plan_dft_1d(n, _fft_ma, _fft_ma, FFTW_FORWARD, FFTW_ESTIMATE);
for (int j=0; j<ma_count; j++) {
_fft_ma[j][0] = ma_coef[j];
_fft_ma[j][1] = 0;
}
for (int j=ma_count; j<n; j++) {
_fft_ma[j][0] = 0;
_fft_ma[j][1] = 0;
}
fftwf_execute(fft_ma_plan);
fftwf_destroy_plan(fft_ma_plan);
cuFloatComplex *fft_ma;
fft_ma = new cuFloatComplex[n];
for (int j=0; j<n; j++) {
fft_ma[j] = make_cuFloatComplex(_fft_ma[j][0], _fft_ma[j][1]);
}
fftwf_free(_fft_ma);
// Device buffers
/*__constant__*/ float *d_hamming;
/*__constant__*/ cuFloatComplex *d_ma;
cuFloatComplex *d_iqhh, *d_iqvv, *d_iqhv;
cuFloatComplex *d_sum;
float *d_result;
//float *d_powhh, *d_powvv;
cudaMalloc(&d_hamming, m*n*sizeof(float));
cudaMalloc(&d_ma, n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqhh, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqvv, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_iqhv, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_sum, m*n*sizeof(cuFloatComplex));
cudaMalloc(&d_result, (m/2)*RESULT_SIZE*sizeof(float));
cudaMemcpy(d_hamming, hamming_coef, m*n*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_ma, fft_ma, n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
// CUFFT initialization
cufftHandle fft_range_handle;
cufftHandle fft_doppler_handle;
cufftHandle fft_pdop_handle;
int rank = 1; // --- 1D FFTs
int nn[] = { m }; // --- Size of the Fourier transform
int istride = n, ostride = n; // --- Distance between two successive input/output elements
int idist = 1, odist = 1; // --- Distance between batches
int inembed[] = { 0 }; // --- Input size with pitch (ignored for 1D transforms)
int onembed[] = { 0 }; // --- Output size with pitch (ignored for 1D transforms)
int batch = n; // --- Number of batched executions
cufftPlanMany(&fft_range_handle, rank, nn,
inembed, istride, idist,
onembed, ostride, odist, CUFFT_C2C, batch);
cufftPlan1d(&fft_doppler_handle, n, CUFFT_C2C, m);
cufftPlan1d(&fft_pdop_handle, n, CUFFT_C2C, m/2);
tock(&tb, &te, "initialization");
float ms; // elapsed time in milliseconds
sector_id = -1;
// create events and streams
cudaEvent_t startEvent, stopEvent;
cudaEventCreate(&startEvent);
cudaEventCreate(&stopEvent);
// cudaEventCreate(&dummyEvent);
cudaEventRecord(startEvent,0);
tick(&tb);
while(sector_id < 126) {
// tick(&tb);
// Read 1 sector data
// cin >> sector_id;
sector_id++;
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhh[i*n+j] = make_cuFloatComplex(i, j);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqvv[i*n+j] = make_cuFloatComplex(j, i);
}
}
for (int i=0; i<m; i++) {
for (int j=0; j<n; j++) {
// cin >> a >> b;
iqhv[i*n+j] = make_cuFloatComplex(i, i);
}
}
cudaMemcpy(d_iqhh, iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_iqvv, iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
cudaMemcpy(d_iqhv, iqhv, m*n*sizeof(cuFloatComplex), cudaMemcpyHostToDevice);
// apply Hamming coefficients
__apply_hamming<<<m,n>>>(d_iqhh, d_hamming);
__apply_hamming<<<m,n>>>(d_iqvv, d_hamming);
__apply_hamming<<<m,n>>>(d_iqhv, d_hamming);
// FFT range profile
cufftExecC2C(fft_range_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_range_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_range_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
// FFT+shift Doppler profile
__sum_v3<<<m,n,n*sizeof(cuFloatComplex)>>>(d_iqhh, d_sum);
__avgconj<<<m,n>>>(d_iqhh, d_sum);
__sum_v3<<<m,n,n*sizeof(cuFloatComplex)>>>(d_iqvv, d_sum);
__avgconj<<<m,n>>>(d_iqvv, d_sum);
__sum_v3<<<m,n,n*sizeof(cuFloatComplex)>>>(d_iqhv, d_sum);
__avgconj<<<m,n>>>(d_iqhv, d_sum);
cufftExecC2C(fft_doppler_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_doppler_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_doppler_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
__conjugate<<<m,n>>>(d_iqhh);
__conjugate<<<m,n>>>(d_iqvv);
__conjugate<<<m,n>>>(d_iqhv);
__shift<<<m,n/2>>>(d_iqhh, n);
__shift<<<m,n/2>>>(d_iqvv, n);
__shift<<<m,n/2>>>(d_iqhv, n);
__clip<<<m,2>>>(d_iqhh, n);
__clip<<<m,2>>>(d_iqvv, n);
__clip<<<m,2>>>(d_iqhv, n);
// Get absolute value
__abssqr<<<m/2,n>>>(d_iqhh, n);
__abssqr<<<m/2,n>>>(d_iqvv, n);
__abssqr<<<m/2,n>>>(d_iqhv, n);
// FFT PDOP
cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_FORWARD);
cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_FORWARD);
cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_FORWARD);
// Apply MA coefficients
__apply_ma<<<m/2,n>>>(d_iqhh, d_ma);
__apply_ma<<<m/2,n>>>(d_iqvv, d_ma);
__apply_ma<<<m/2,n>>>(d_iqhv, d_ma);
// Inverse FFT
cufftExecC2C(fft_pdop_handle, d_iqhh, d_iqhh, CUFFT_INVERSE);
cufftExecC2C(fft_pdop_handle, d_iqvv, d_iqvv, CUFFT_INVERSE);
cufftExecC2C(fft_pdop_handle, d_iqhv, d_iqhv, CUFFT_INVERSE);
__scale_real<<<m/2,n>>>(d_iqhh);
__scale_real<<<m/2,n>>>(d_iqvv);
__scale_real<<<m/2,n>>>(d_iqhv);
// Sum
__sum_inplace_v3<<<m/2,n,n*sizeof(cuFloatComplex)>>>(d_iqhh);
__sum_inplace_v3<<<m/2,n,n*sizeof(cuFloatComplex)>>>(d_iqvv);
__sum_inplace_v3<<<m/2,n,n*sizeof(cuFloatComplex)>>>(d_iqhv);
// cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// float z = pow(i*k_rangeres, 2.0) * k_calib * iqhh[i*n].x;
// float zdb = 10 * log10(z);
// float zdr = 10 * (log10(iqhh[i*n].x)-log10(iqvv[i*n].x));
// cout << zdb << " " << zdr << endl;
// }
// exit(0);
// Calculate ZdB, Zdr
__calcresult_v2<<<1,m/2>>>(d_iqhh, d_iqvv, d_iqhv, d_result, n);
cudaMemcpy(result, d_result, (m/2)*RESULT_SIZE*sizeof(float), cudaMemcpyDeviceToHost);
// for (int i=0; i<m/2; i++) {
// for (int j=0; j<RESULT_SIZE; j++) {
// cout << result[i*RESULT_SIZE+j] << " ";
// }
// cout << endl;
// }
// exit(0);
}
tock(&tb, &te, "All (us)");
cudaEventRecord(stopEvent, 0);
cudaEventSynchronize(stopEvent);
cudaEventElapsedTime(&ms, startEvent, stopEvent);
printf("Time for sequential transfer and execute (ms): %f\n", ms);
cudaEventDestroy(startEvent);
cudaEventDestroy(stopEvent);
cudaFree(d_hamming);
cudaFree(d_ma);
cudaFree(d_iqhh);
cudaFree(d_iqvv);
cudaFree(d_iqhv);
delete[] iqhh;
delete[] iqvv;
delete[] iqhv;
return 0;
}
// cudaMemcpy(iqhh, d_iqhh, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// cudaMemcpy(iqvv, d_iqvv, m*n*sizeof(cuFloatComplex), cudaMemcpyDeviceToHost);
// for (int i=0; i<m; i++) {
// for (int j=0; j<n; j++) {
// cout << "(" << iqhh[i*n+j].x << "," << iqhh[i*n+j].y << ") ";
// }
// cout << endl;
// }
// // for (int i=0; i<m; i++) {
// // for (int j=0; j<n; j++) {
// // cout << iqvv[i*n+j].x << " ";
// // }
// // cout << endl;
// // }
// exit(0);
|
b7b4d017ed2611a5852d691256fd7db2ff62ed94.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
//
// This kernel is split into a separate compilation unit to cut down
// on compile time
//
namespace faiss {
namespace gpu {
// This is warp divergence central, but this is really a final step
// and happening a small number of times
inline __device__ int binarySearchForBucket(
int* prefixSumOffsets,
int size,
int val) {
int start = 0;
int end = size;
while (end - start > 0) {
int mid = start + (end - start) / 2;
int midVal = prefixSumOffsets[mid];
// Find the first bucket that we are <=
if (midVal <= val) {
start = mid + 1;
} else {
end = mid;
}
}
// We must find the bucket that it is in
assert(start != size);
return start;
}
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ, bool Dir>
__global__ void pass2SelectLists(
Tensor<float, 2, true> heapDistances,
Tensor<int, 2, true> heapIndices,
void** listIndices,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<Index::idx_t, 2, true> ivfListIds,
int k,
IndicesOptions opt,
Tensor<float, 2, true> outDistances,
Tensor<Index::idx_t, 2, true> outIndices) {
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
constexpr auto kInit = Dir ? kFloatMin : kFloatMax;
BlockSelect<
float,
int,
Dir,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kInit, -1, smemK, smemV, k);
auto queryId = blockIdx.x;
int num = heapDistances.getSize(1);
int limit = utils::roundDown(num, kWarpSize);
int i = threadIdx.x;
auto heapDistanceStart = heapDistances[queryId];
// BlockSelect add cannot be used in a warp divergent circumstance; we
// handle the remainder warp below
for (; i < limit; i += blockDim.x) {
heap.add(heapDistanceStart[i], i);
}
// Handle warp divergence separately
if (i < num) {
heap.addThreadQ(heapDistanceStart[i], i);
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[queryId][i] = smemK[i];
// `v` is the index in `heapIndices`
// We need to translate this into an original user index. The
// reason why we don't maintain intermediate results in terms of
// user indices is to substantially reduce temporary memory
// requirements and global memory write traffic for the list
// scanning.
// This code is highly divergent, but it's probably ok, since this
// is the very last step and it is happening a small number of
// times (#queries x k).
int v = smemV[i];
Index::idx_t index = -1;
if (v != -1) {
// `offset` is the offset of the intermediate result, as
// calculated by the original scan.
int offset = heapIndices[queryId][v];
// In order to determine the actual user index, we need to first
// determine what list it was in.
// We do this by binary search in the prefix sum list.
int probe = binarySearchForBucket(
prefixSumOffsets[queryId].data(),
prefixSumOffsets.getSize(1),
offset);
// This is then the probe for the query; we can find the actual
// list ID from this
Index::idx_t listId = ivfListIds[queryId][probe];
// Now, we need to know the offset within the list
// We ensure that before the array (at offset -1), there is a 0
// value
int listStart = *(prefixSumOffsets[queryId][probe].data() - 1);
int listOffset = offset - listStart;
// This gives us our final index
if (opt == INDICES_32_BIT) {
index = (Index::idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((Index::idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (Index::idx_t)listOffset);
}
}
outIndices[queryId][i] = index;
}
}
void runPass2SelectLists(
Tensor<float, 2, true>& heapDistances,
Tensor<int, 2, true>& heapIndices,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<Index::idx_t, 2, true>& ivfListIds,
int k,
bool chooseLargest,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
hipStream_t stream) {
auto grid = dim3(ivfListIds.getSize(0));
#define RUN_PASS(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR) \
do { \
hipLaunchKernelGGL(( pass2SelectLists<BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR>) \
, dim3(grid), dim3(BLOCK), 0, stream, \
heapDistances, \
heapIndices, \
listIndices.data(), \
prefixSumOffsets, \
ivfListIds, \
k, \
indicesOptions, \
outDistances, \
outIndices); \
CUDA_TEST_ERROR(); \
return; /* success */ \
} while (0)
#if GPU_MAX_SELECTION_K >= 2048
// block size 128 for k <= 1024, 64 for k = 2048
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} else if (k <= 2048) { \
RUN_PASS(64, 2048, 8, DIR); \
} \
} while (0)
#else
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} \
} while (0)
#endif // GPU_MAX_SELECTION_K
if (chooseLargest) {
RUN_PASS_DIR(true);
} else {
RUN_PASS_DIR(false);
}
// unimplemented / too many resources
FAISS_ASSERT_FMT(false, "unimplemented k value (%d)", k);
#undef RUN_PASS_DIR
#undef RUN_PASS
}
} // namespace gpu
} // namespace faiss
| b7b4d017ed2611a5852d691256fd7db2ff62ed94.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/gpu/impl/IVFUtils.cuh>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/Limits.cuh>
#include <faiss/gpu/utils/Select.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
//
// This kernel is split into a separate compilation unit to cut down
// on compile time
//
namespace faiss {
namespace gpu {
// This is warp divergence central, but this is really a final step
// and happening a small number of times
inline __device__ int binarySearchForBucket(
int* prefixSumOffsets,
int size,
int val) {
int start = 0;
int end = size;
while (end - start > 0) {
int mid = start + (end - start) / 2;
int midVal = prefixSumOffsets[mid];
// Find the first bucket that we are <=
if (midVal <= val) {
start = mid + 1;
} else {
end = mid;
}
}
// We must find the bucket that it is in
assert(start != size);
return start;
}
template <int ThreadsPerBlock, int NumWarpQ, int NumThreadQ, bool Dir>
__global__ void pass2SelectLists(
Tensor<float, 2, true> heapDistances,
Tensor<int, 2, true> heapIndices,
void** listIndices,
Tensor<int, 2, true> prefixSumOffsets,
Tensor<Index::idx_t, 2, true> ivfListIds,
int k,
IndicesOptions opt,
Tensor<float, 2, true> outDistances,
Tensor<Index::idx_t, 2, true> outIndices) {
constexpr int kNumWarps = ThreadsPerBlock / kWarpSize;
__shared__ float smemK[kNumWarps * NumWarpQ];
__shared__ int smemV[kNumWarps * NumWarpQ];
constexpr auto kInit = Dir ? kFloatMin : kFloatMax;
BlockSelect<
float,
int,
Dir,
Comparator<float>,
NumWarpQ,
NumThreadQ,
ThreadsPerBlock>
heap(kInit, -1, smemK, smemV, k);
auto queryId = blockIdx.x;
int num = heapDistances.getSize(1);
int limit = utils::roundDown(num, kWarpSize);
int i = threadIdx.x;
auto heapDistanceStart = heapDistances[queryId];
// BlockSelect add cannot be used in a warp divergent circumstance; we
// handle the remainder warp below
for (; i < limit; i += blockDim.x) {
heap.add(heapDistanceStart[i], i);
}
// Handle warp divergence separately
if (i < num) {
heap.addThreadQ(heapDistanceStart[i], i);
}
// Merge all final results
heap.reduce();
for (int i = threadIdx.x; i < k; i += blockDim.x) {
outDistances[queryId][i] = smemK[i];
// `v` is the index in `heapIndices`
// We need to translate this into an original user index. The
// reason why we don't maintain intermediate results in terms of
// user indices is to substantially reduce temporary memory
// requirements and global memory write traffic for the list
// scanning.
// This code is highly divergent, but it's probably ok, since this
// is the very last step and it is happening a small number of
// times (#queries x k).
int v = smemV[i];
Index::idx_t index = -1;
if (v != -1) {
// `offset` is the offset of the intermediate result, as
// calculated by the original scan.
int offset = heapIndices[queryId][v];
// In order to determine the actual user index, we need to first
// determine what list it was in.
// We do this by binary search in the prefix sum list.
int probe = binarySearchForBucket(
prefixSumOffsets[queryId].data(),
prefixSumOffsets.getSize(1),
offset);
// This is then the probe for the query; we can find the actual
// list ID from this
Index::idx_t listId = ivfListIds[queryId][probe];
// Now, we need to know the offset within the list
// We ensure that before the array (at offset -1), there is a 0
// value
int listStart = *(prefixSumOffsets[queryId][probe].data() - 1);
int listOffset = offset - listStart;
// This gives us our final index
if (opt == INDICES_32_BIT) {
index = (Index::idx_t)((int*)listIndices[listId])[listOffset];
} else if (opt == INDICES_64_BIT) {
index = ((Index::idx_t*)listIndices[listId])[listOffset];
} else {
index = (listId << 32 | (Index::idx_t)listOffset);
}
}
outIndices[queryId][i] = index;
}
}
void runPass2SelectLists(
Tensor<float, 2, true>& heapDistances,
Tensor<int, 2, true>& heapIndices,
DeviceVector<void*>& listIndices,
IndicesOptions indicesOptions,
Tensor<int, 2, true>& prefixSumOffsets,
Tensor<Index::idx_t, 2, true>& ivfListIds,
int k,
bool chooseLargest,
Tensor<float, 2, true>& outDistances,
Tensor<Index::idx_t, 2, true>& outIndices,
cudaStream_t stream) {
auto grid = dim3(ivfListIds.getSize(0));
#define RUN_PASS(BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR) \
do { \
pass2SelectLists<BLOCK, NUM_WARP_Q, NUM_THREAD_Q, DIR> \
<<<grid, BLOCK, 0, stream>>>( \
heapDistances, \
heapIndices, \
listIndices.data(), \
prefixSumOffsets, \
ivfListIds, \
k, \
indicesOptions, \
outDistances, \
outIndices); \
CUDA_TEST_ERROR(); \
return; /* success */ \
} while (0)
#if GPU_MAX_SELECTION_K >= 2048
// block size 128 for k <= 1024, 64 for k = 2048
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} else if (k <= 2048) { \
RUN_PASS(64, 2048, 8, DIR); \
} \
} while (0)
#else
#define RUN_PASS_DIR(DIR) \
do { \
if (k == 1) { \
RUN_PASS(128, 1, 1, DIR); \
} else if (k <= 32) { \
RUN_PASS(128, 32, 2, DIR); \
} else if (k <= 64) { \
RUN_PASS(128, 64, 3, DIR); \
} else if (k <= 128) { \
RUN_PASS(128, 128, 3, DIR); \
} else if (k <= 256) { \
RUN_PASS(128, 256, 4, DIR); \
} else if (k <= 512) { \
RUN_PASS(128, 512, 8, DIR); \
} else if (k <= 1024) { \
RUN_PASS(128, 1024, 8, DIR); \
} \
} while (0)
#endif // GPU_MAX_SELECTION_K
if (chooseLargest) {
RUN_PASS_DIR(true);
} else {
RUN_PASS_DIR(false);
}
// unimplemented / too many resources
FAISS_ASSERT_FMT(false, "unimplemented k value (%d)", k);
#undef RUN_PASS_DIR
#undef RUN_PASS
}
} // namespace gpu
} // namespace faiss
|
9d342b28e76d52698d73f9c2cafbba98ccec4fca.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "flashlight/lib/sequence/criterion/cuda/CriterionUtils.cuh"
#include <algorithm>
namespace {
using fl::lib::seq::CriterionScaleMode;
using namespace fl::lib::seq;
/*
* B thread blocks
* 32 threads/block (ideally)
*/
__global__ void
batchTargetSizeKernel(int L, int maxSize, const int* _target, int* targetSize) {
int b = blockIdx.x;
auto target = _target + b * L;
__shared__ int idx;
if (threadIdx.x == 0) {
idx = 0;
}
__syncthreads();
for (int i = L - 1 - threadIdx.x; i >= 0; i -= blockDim.x) {
if (target[i] >= 0) {
atomicMax(&idx, i + 1);
break;
}
}
__syncthreads();
if (threadIdx.x == 0) {
targetSize[b] = idx < maxSize ? idx : maxSize;
}
}
/*
* 1 thread block
* B threads/block (ideally)
*/
template <class Float>
__global__ void computeScaleKernel(
int B,
int T,
int /* N */,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale) {
for (int b = threadIdx.x; b < B; b += blockDim.x) {
switch (scaleMode) {
case CriterionScaleMode::NONE:
scale[b] = 1.0;
break;
case CriterionScaleMode::INPUT_SZ:
scale[b] = T > 0 ? 1.0 / T : 1.0;
break;
case CriterionScaleMode::INPUT_SZ_SQRT:
scale[b] = T > 0 ? std::sqrt(1.0 / T) : 1.0;
break;
case CriterionScaleMode::TARGET_SZ:
scale[b] = targetSize[b] > 0 ? 1.0 / targetSize[b] : 1.0;
break;
case CriterionScaleMode::TARGET_SZ_SQRT:
scale[b] = targetSize[b] > 0 ? std::sqrt(1.0 / targetSize[b]) : 1.0;
break;
default:
break;
}
}
}
} // namespace
namespace fl {
namespace lib {
namespace cuda {
template <class Float>
void CriterionUtils<Float>::batchTargetSize(
int B,
int L,
int maxSize,
const int* target,
int* targetSize,
hipStream_t stream) {
hipLaunchKernelGGL(( batchTargetSizeKernel), dim3(B), dim3(32), 0, stream, L, maxSize, target, targetSize);
}
template <class Float>
void CriterionUtils<Float>::computeScale(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale,
hipStream_t stream) {
int blockSize = ::min(256, (B + 31) / 32 * 32);
hipLaunchKernelGGL(( computeScaleKernel), dim3(1), dim3(blockSize), 0, stream,
B, T, N, scaleMode, targetSize, scale);
}
template struct CriterionUtils<float>;
template struct CriterionUtils<double>;
} // namespace cuda
} // namespace lib
} // namespace fl
| 9d342b28e76d52698d73f9c2cafbba98ccec4fca.cu | /*
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the BSD-style license found in the
* LICENSE file in the root directory of this source tree.
*/
#include "flashlight/lib/sequence/criterion/cuda/CriterionUtils.cuh"
#include <algorithm>
namespace {
using fl::lib::seq::CriterionScaleMode;
using namespace fl::lib::seq;
/*
* B thread blocks
* 32 threads/block (ideally)
*/
__global__ void
batchTargetSizeKernel(int L, int maxSize, const int* _target, int* targetSize) {
int b = blockIdx.x;
auto target = _target + b * L;
__shared__ int idx;
if (threadIdx.x == 0) {
idx = 0;
}
__syncthreads();
for (int i = L - 1 - threadIdx.x; i >= 0; i -= blockDim.x) {
if (target[i] >= 0) {
atomicMax(&idx, i + 1);
break;
}
}
__syncthreads();
if (threadIdx.x == 0) {
targetSize[b] = idx < maxSize ? idx : maxSize;
}
}
/*
* 1 thread block
* B threads/block (ideally)
*/
template <class Float>
__global__ void computeScaleKernel(
int B,
int T,
int /* N */,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale) {
for (int b = threadIdx.x; b < B; b += blockDim.x) {
switch (scaleMode) {
case CriterionScaleMode::NONE:
scale[b] = 1.0;
break;
case CriterionScaleMode::INPUT_SZ:
scale[b] = T > 0 ? 1.0 / T : 1.0;
break;
case CriterionScaleMode::INPUT_SZ_SQRT:
scale[b] = T > 0 ? std::sqrt(1.0 / T) : 1.0;
break;
case CriterionScaleMode::TARGET_SZ:
scale[b] = targetSize[b] > 0 ? 1.0 / targetSize[b] : 1.0;
break;
case CriterionScaleMode::TARGET_SZ_SQRT:
scale[b] = targetSize[b] > 0 ? std::sqrt(1.0 / targetSize[b]) : 1.0;
break;
default:
break;
}
}
}
} // namespace
namespace fl {
namespace lib {
namespace cuda {
template <class Float>
void CriterionUtils<Float>::batchTargetSize(
int B,
int L,
int maxSize,
const int* target,
int* targetSize,
cudaStream_t stream) {
batchTargetSizeKernel<<<B, 32, 0, stream>>>(L, maxSize, target, targetSize);
}
template <class Float>
void CriterionUtils<Float>::computeScale(
int B,
int T,
int N,
CriterionScaleMode scaleMode,
const int* targetSize,
Float* scale,
cudaStream_t stream) {
int blockSize = std::min(256, (B + 31) / 32 * 32);
computeScaleKernel<<<1, blockSize, 0, stream>>>(
B, T, N, scaleMode, targetSize, scale);
}
template struct CriterionUtils<float>;
template struct CriterionUtils<double>;
} // namespace cuda
} // namespace lib
} // namespace fl
|
acc8cc3951f3ef18c2e6d933cf874294c86c0b7f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
if (bottom.size() >= 2) {
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)0., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)0., weight_diff);
}
}
else {
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
| acc8cc3951f3ef18c2e6d933cf874294c86c0b7f.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layers/inner_product_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void kernel_channel_dot(const int num, const int dim,
const Dtype* data_1, const Dtype* data_2,
Dtype* channel_dot, Dtype epsilon) {
CUDA_KERNEL_LOOP(index, num) {
Dtype dot = 0;
for (int d = 0; d < dim; ++d) {
dot += data_1[index * dim + d] * data_2[index * dim + d];
}
channel_dot[index] = dot + epsilon;
}
}
template <typename Dtype>
__global__ void kernel_channel_scal(const int num, const int dim,
const Dtype* norm_data,
Dtype* input_output_data) {
CUDA_KERNEL_LOOP(index, num * dim) {
int n = index / dim;
input_output_data[index] *= norm_data[n];
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if (normalize_ && bottom.size() == 1) {
Dtype* mutable_weight = this->blobs_[0]->mutable_gpu_data();
Dtype* weight_norm_data = weight_norm_.mutable_gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_dot<Dtype> << <CAFFE_GET_BLOCKS(N_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight, weight, weight_norm_data, 1e-12);
caffe_gpu_powx(N_, weight_norm_data, Dtype(-0.5), weight_norm_data);
// NOLINT_NEXT_LINE(whitespace/operators)
kernel_channel_scal<Dtype> << <CAFFE_GET_BLOCKS(N_ * K_),
CAFFE_CUDA_NUM_THREADS >> >(N_, K_, weight_norm_data, mutable_weight);
}
if (M_ == 1) {
caffe_gpu_gemv<Dtype>(CblasNoTrans, N_, K_, (Dtype)1.,
weight, bottom_data, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_axpy<Dtype>(N_, bias_multiplier_.cpu_data()[0],
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), top_data);
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans,
transpose_ ? CblasNoTrans : CblasTrans,
M_, N_, K_, (Dtype)1.,
bottom_data, weight, (Dtype)0., top_data);
if (bias_term_)
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans, M_, N_, 1, (Dtype)1.,
bias_multiplier_.gpu_data(),
bottom.size() == 3 ? bottom[2]->gpu_data() : this->blobs_[1]->gpu_data(), (Dtype)1., top_data);
}
}
template <typename Dtype>
void InnerProductLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
const Dtype* weight = bottom.size() >= 2 ? bottom[1]->gpu_data() : this->blobs_[0]->gpu_data();
if ((bottom.size() == 1 && this->param_propagate_down_[0]) ||
(bottom.size() >= 2 && propagate_down[1])) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* weight_diff = bottom.size() >= 2 ? bottom[1]->mutable_gpu_diff() : this->blobs_[0]->mutable_gpu_diff();
if (bottom.size() >= 2) {
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)0., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)0., weight_diff);
}
}
else {
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
K_, N_, M_,
(Dtype)1., bottom_data, top_diff,
(Dtype)1., weight_diff);
}
else {
caffe_gpu_gemm<Dtype>(CblasTrans, CblasNoTrans,
N_, K_, M_,
(Dtype)1., top_diff, bottom_data,
(Dtype)1., weight_diff);
}
}
}
if (bias_term_ && (this->param_propagate_down_[1] ||
(bottom.size() == 3 && propagate_down[2]))) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bias
caffe_gpu_gemv<Dtype>(CblasTrans, M_, N_, (Dtype)1., top_diff,
bias_multiplier_.gpu_data(), (Dtype)1.,
bottom.size() == 3 ? bottom[2]->mutable_gpu_diff() : this->blobs_[1]->mutable_gpu_diff());
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
// Gradient with respect to bottom data
if (transpose_) {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
} else {
caffe_gpu_gemm<Dtype>(CblasNoTrans, CblasNoTrans,
M_, K_, N_,
(Dtype)1., top_diff, weight,
(Dtype)0., bottom[0]->mutable_gpu_diff());
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(InnerProductLayer);
} // namespace caffe
|
ec9e6fd2e29eadf61f9939175d35baeb8edd913e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <math_constants.h> // in CUDA SDK, for CUDART_NAN_F
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <algorithm>
namespace faiss {
namespace gpu {
template <typename IndexT, typename CentroidT, bool LargeDim>
__global__ void calcResidual(
Tensor<float, 2, true> vecs,
Tensor<CentroidT, 2, true> centroids,
Tensor<IndexT, 1, true> vecToCentroid,
Tensor<float, 2, true> residuals) {
auto vec = vecs[blockIdx.x];
auto residual = residuals[blockIdx.x];
IndexT centroidId = vecToCentroid[blockIdx.x];
// Vector could be invalid (containing NaNs), so -1 was the
// classified centroid
if (centroidId == -1) {
if (LargeDim) {
for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
residual[i] = CUDART_NAN_F;
}
} else {
residual[threadIdx.x] = CUDART_NAN_F;
}
return;
}
auto centroid = centroids[centroidId];
if (LargeDim) {
for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
residual[i] = vec[i] - ConvertTo<float>::to(centroid[i]);
}
} else {
residual[threadIdx.x] =
vec[threadIdx.x] - ConvertTo<float>::to(centroid[threadIdx.x]);
}
}
template <typename IndexT, typename CentroidT>
void calcResidual(
Tensor<float, 2, true>& vecs,
Tensor<CentroidT, 2, true>& centroids,
Tensor<IndexT, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
hipStream_t stream) {
FAISS_ASSERT(vecs.getSize(1) == centroids.getSize(1));
FAISS_ASSERT(vecs.getSize(1) == residuals.getSize(1));
FAISS_ASSERT(vecs.getSize(0) == vecToCentroid.getSize(0));
FAISS_ASSERT(vecs.getSize(0) == residuals.getSize(0));
dim3 grid(vecs.getSize(0));
int maxThreads = getMaxThreadsCurrentDevice();
bool largeDim = vecs.getSize(1) > maxThreads;
dim3 block(::min(vecs.getSize(1), maxThreads));
if (largeDim) {
hipLaunchKernelGGL(( calcResidual<IndexT, CentroidT, true>), dim3(grid), dim3(block), 0, stream,
vecs, centroids, vecToCentroid, residuals);
} else {
hipLaunchKernelGGL(( calcResidual<IndexT, CentroidT, false>), dim3(grid), dim3(block), 0, stream,
vecs, centroids, vecToCentroid, residuals);
}
CUDA_TEST_ERROR();
}
void runCalcResidual(
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& centroids,
Tensor<Index::idx_t, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
hipStream_t stream) {
calcResidual<Index::idx_t, float>(
vecs, centroids, vecToCentroid, residuals, stream);
}
void runCalcResidual(
Tensor<float, 2, true>& vecs,
Tensor<half, 2, true>& centroids,
Tensor<Index::idx_t, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
hipStream_t stream) {
calcResidual<Index::idx_t, half>(
vecs, centroids, vecToCentroid, residuals, stream);
}
template <typename IndexT, typename T>
__global__ void gatherReconstructByIds(
Tensor<IndexT, 1, true> ids,
Tensor<T, 2, true> vecs,
Tensor<float, 2, true> out) {
IndexT id = ids[blockIdx.x];
// FIXME: will update all GPU code shortly to use int64 indexing types, but
// this is a minimal change to allow for >= 2^31 elements in a matrix
// auto vec = vecs[id];
// auto outVec = out[blockIdx.x];
auto vec = vecs.data() + id * vecs.getSize(1);
auto outVec = out.data() + blockIdx.x * out.getSize(1);
Convert<T, float> conv;
for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]);
}
}
template <typename IndexT, typename T>
__global__ void gatherReconstructByRange(
IndexT start,
IndexT num,
Tensor<T, 2, true> vecs,
Tensor<float, 2, true> out) {
IndexT id = start + blockIdx.x;
// FIXME: will update all GPU code shortly to use int64 indexing types, but
// this is a minimal change to allow for >= 2^31 elements in a matrix
// auto vec = vecs[id];
// auto outVec = out[blockIdx.x];
auto vec = vecs.data() + id * vecs.getSize(1);
auto outVec = out.data() + blockIdx.x * out.getSize(1);
Convert<T, float> conv;
for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]);
}
}
template <typename IndexT, typename T>
void gatherReconstructByIds(
Tensor<IndexT, 1, true>& ids,
Tensor<T, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
FAISS_ASSERT(ids.getSize(0) == out.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == out.getSize(1));
dim3 grid(ids.getSize(0));
int maxThreads = getMaxThreadsCurrentDevice();
dim3 block(::min(vecs.getSize(1), maxThreads));
hipLaunchKernelGGL(( gatherReconstructByIds<IndexT, T>)
, dim3(grid), dim3(block), 0, stream, ids, vecs, out);
CUDA_TEST_ERROR();
}
template <typename IndexT, typename T>
void gatherReconstructByRange(
IndexT start,
IndexT num,
Tensor<T, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
FAISS_ASSERT(num > 0);
FAISS_ASSERT(num == out.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == out.getSize(1));
FAISS_ASSERT(start + num <= vecs.getSize(0));
dim3 grid(num);
int maxThreads = getMaxThreadsCurrentDevice();
dim3 block(::min(vecs.getSize(1), maxThreads));
hipLaunchKernelGGL(( gatherReconstructByRange<IndexT, T>)
, dim3(grid), dim3(block), 0, stream, start, num, vecs, out);
CUDA_TEST_ERROR();
}
void runReconstruct(
Tensor<Index::idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
gatherReconstructByIds<Index::idx_t, float>(ids, vecs, out, stream);
}
void runReconstruct(
Tensor<Index::idx_t, 1, true>& ids,
Tensor<half, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
gatherReconstructByIds<Index::idx_t, half>(ids, vecs, out, stream);
}
void runReconstruct(
Index::idx_t start,
Index::idx_t num,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
gatherReconstructByRange<Index::idx_t, float>(
start, num, vecs, out, stream);
}
void runReconstruct(
Index::idx_t start,
Index::idx_t num,
Tensor<half, 2, true>& vecs,
Tensor<float, 2, true>& out,
hipStream_t stream) {
gatherReconstructByRange<Index::idx_t, half>(start, num, vecs, out, stream);
}
} // namespace gpu
} // namespace faiss
| ec9e6fd2e29eadf61f9939175d35baeb8edd913e.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/StaticUtils.h>
#include <faiss/impl/FaissAssert.h>
#include <math_constants.h> // in CUDA SDK, for CUDART_NAN_F
#include <faiss/gpu/impl/VectorResidual.cuh>
#include <faiss/gpu/utils/ConversionOperators.cuh>
#include <faiss/gpu/utils/Tensor.cuh>
#include <algorithm>
namespace faiss {
namespace gpu {
template <typename IndexT, typename CentroidT, bool LargeDim>
__global__ void calcResidual(
Tensor<float, 2, true> vecs,
Tensor<CentroidT, 2, true> centroids,
Tensor<IndexT, 1, true> vecToCentroid,
Tensor<float, 2, true> residuals) {
auto vec = vecs[blockIdx.x];
auto residual = residuals[blockIdx.x];
IndexT centroidId = vecToCentroid[blockIdx.x];
// Vector could be invalid (containing NaNs), so -1 was the
// classified centroid
if (centroidId == -1) {
if (LargeDim) {
for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
residual[i] = CUDART_NAN_F;
}
} else {
residual[threadIdx.x] = CUDART_NAN_F;
}
return;
}
auto centroid = centroids[centroidId];
if (LargeDim) {
for (int i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
residual[i] = vec[i] - ConvertTo<float>::to(centroid[i]);
}
} else {
residual[threadIdx.x] =
vec[threadIdx.x] - ConvertTo<float>::to(centroid[threadIdx.x]);
}
}
template <typename IndexT, typename CentroidT>
void calcResidual(
Tensor<float, 2, true>& vecs,
Tensor<CentroidT, 2, true>& centroids,
Tensor<IndexT, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
cudaStream_t stream) {
FAISS_ASSERT(vecs.getSize(1) == centroids.getSize(1));
FAISS_ASSERT(vecs.getSize(1) == residuals.getSize(1));
FAISS_ASSERT(vecs.getSize(0) == vecToCentroid.getSize(0));
FAISS_ASSERT(vecs.getSize(0) == residuals.getSize(0));
dim3 grid(vecs.getSize(0));
int maxThreads = getMaxThreadsCurrentDevice();
bool largeDim = vecs.getSize(1) > maxThreads;
dim3 block(std::min(vecs.getSize(1), maxThreads));
if (largeDim) {
calcResidual<IndexT, CentroidT, true><<<grid, block, 0, stream>>>(
vecs, centroids, vecToCentroid, residuals);
} else {
calcResidual<IndexT, CentroidT, false><<<grid, block, 0, stream>>>(
vecs, centroids, vecToCentroid, residuals);
}
CUDA_TEST_ERROR();
}
void runCalcResidual(
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& centroids,
Tensor<Index::idx_t, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
cudaStream_t stream) {
calcResidual<Index::idx_t, float>(
vecs, centroids, vecToCentroid, residuals, stream);
}
void runCalcResidual(
Tensor<float, 2, true>& vecs,
Tensor<half, 2, true>& centroids,
Tensor<Index::idx_t, 1, true>& vecToCentroid,
Tensor<float, 2, true>& residuals,
cudaStream_t stream) {
calcResidual<Index::idx_t, half>(
vecs, centroids, vecToCentroid, residuals, stream);
}
template <typename IndexT, typename T>
__global__ void gatherReconstructByIds(
Tensor<IndexT, 1, true> ids,
Tensor<T, 2, true> vecs,
Tensor<float, 2, true> out) {
IndexT id = ids[blockIdx.x];
// FIXME: will update all GPU code shortly to use int64 indexing types, but
// this is a minimal change to allow for >= 2^31 elements in a matrix
// auto vec = vecs[id];
// auto outVec = out[blockIdx.x];
auto vec = vecs.data() + id * vecs.getSize(1);
auto outVec = out.data() + blockIdx.x * out.getSize(1);
Convert<T, float> conv;
for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]);
}
}
template <typename IndexT, typename T>
__global__ void gatherReconstructByRange(
IndexT start,
IndexT num,
Tensor<T, 2, true> vecs,
Tensor<float, 2, true> out) {
IndexT id = start + blockIdx.x;
// FIXME: will update all GPU code shortly to use int64 indexing types, but
// this is a minimal change to allow for >= 2^31 elements in a matrix
// auto vec = vecs[id];
// auto outVec = out[blockIdx.x];
auto vec = vecs.data() + id * vecs.getSize(1);
auto outVec = out.data() + blockIdx.x * out.getSize(1);
Convert<T, float> conv;
for (IndexT i = threadIdx.x; i < vecs.getSize(1); i += blockDim.x) {
outVec[i] = id == IndexT(-1) ? 0.0f : conv(vec[i]);
}
}
template <typename IndexT, typename T>
void gatherReconstructByIds(
Tensor<IndexT, 1, true>& ids,
Tensor<T, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
FAISS_ASSERT(ids.getSize(0) == out.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == out.getSize(1));
dim3 grid(ids.getSize(0));
int maxThreads = getMaxThreadsCurrentDevice();
dim3 block(std::min(vecs.getSize(1), maxThreads));
gatherReconstructByIds<IndexT, T>
<<<grid, block, 0, stream>>>(ids, vecs, out);
CUDA_TEST_ERROR();
}
template <typename IndexT, typename T>
void gatherReconstructByRange(
IndexT start,
IndexT num,
Tensor<T, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
FAISS_ASSERT(num > 0);
FAISS_ASSERT(num == out.getSize(0));
FAISS_ASSERT(vecs.getSize(1) == out.getSize(1));
FAISS_ASSERT(start + num <= vecs.getSize(0));
dim3 grid(num);
int maxThreads = getMaxThreadsCurrentDevice();
dim3 block(std::min(vecs.getSize(1), maxThreads));
gatherReconstructByRange<IndexT, T>
<<<grid, block, 0, stream>>>(start, num, vecs, out);
CUDA_TEST_ERROR();
}
void runReconstruct(
Tensor<Index::idx_t, 1, true>& ids,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
gatherReconstructByIds<Index::idx_t, float>(ids, vecs, out, stream);
}
void runReconstruct(
Tensor<Index::idx_t, 1, true>& ids,
Tensor<half, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
gatherReconstructByIds<Index::idx_t, half>(ids, vecs, out, stream);
}
void runReconstruct(
Index::idx_t start,
Index::idx_t num,
Tensor<float, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
gatherReconstructByRange<Index::idx_t, float>(
start, num, vecs, out, stream);
}
void runReconstruct(
Index::idx_t start,
Index::idx_t num,
Tensor<half, 2, true>& vecs,
Tensor<float, 2, true>& out,
cudaStream_t stream) {
gatherReconstructByRange<Index::idx_t, half>(start, num, vecs, out, stream);
}
} // namespace gpu
} // namespace faiss
|
eb1118da3045ecc3054c11b81808e792c688a4a2.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include<limits.h>
#include <hip/hip_runtime.h>
#define maxWIDTH 300
#define BOUND 200
#include<time.h>
#include<quicksort.h>
__constant__ Point2D sortedX[maxWIDTH];
__constant__ Point2D sortedY[maxWIDTH];
__host__ void getRandomInputPoints(int n,Point2D * ptr)
{
srand(0);
int i=0;
int pointx=0,pointy=0;
for(i=0;i<n;i++)
{
pointx=rand()%BOUND;
ptr->x=pointx;
pointy=rand()%BOUND;
ptr->y=pointy;
ptr++;
}
}
__host__ void printArray(Point2D*ptr,int n)
{
int i=0;
for(i=0;i<n;i++)
{
printf("Point %d - X: %d , Y: %d\n",i+1,ptr[i].x,ptr[i].y);
}
}
__device__ int minAreaFunction(int *ptr)
{
int minArea=INT_MAX;
for(int i=0;i<(maxWIDTH*maxWIDTH);i++)
{
if(ptr[i]<minArea && ptr[i]!=0)
{
minArea=ptr[i];
}
}
return minArea;
}
__device__ void sortedArea(int *area,int *currentPoint)
{
int totalpoint=*currentPoint;
int i,j,temp;
for(i=0;i<totalpoint-1;i++)
{
for(j=0;j<totalpoint-i-1;j++)
{
if(area[j]>area[j+1])
{
temp=area[j+1];
area[j+1]=area[j];
area[j]=temp;
}
}
}
}
__device__ int * getArea(Point2D *ptr,int len,int leftEdge,int bottomEdge,int *currentPoint)
{
int totalpoint=*currentPoint;
int *area=(int*)malloc(totalpoint * sizeof(int));
int i=0;
int ar=0;
for(i=0;i<totalpoint;i++)
{
if((ptr[i].x+1-leftEdge) > (ptr[i].y+1-bottomEdge))
{
ar=(int)(ptr[i].x+1-leftEdge)*((ptr[i].x+1-leftEdge));
}
else
{
ar=(int)(ptr[i].y+1-bottomEdge)*((ptr[i].y+1-bottomEdge));
}
area[i]=ar;
}
return area;
}
__device__ Point2D* getAboveRightPoints(int startx,int starty,Point2D* ptrx,Point2D* ptry,int len,int *currentPoint)
{
int k=0;
int i,j;
int count=0;
int totalpoint=*currentPoint;
Point2D* temp;
int check=0;
for(i=startx;i<len;i++)
{
for(j=starty;j<len;j++)
{
if(ptrx[i].x==ptry[j].x && ptrx[i].y==ptry[j].y)
{
count++;
break;
}
}
}
totalpoint=count;
temp=(Point2D*)malloc(totalpoint*sizeof(Point2D));
for(j=starty;j<len;j++)
{
for(i=startx;i<len;i++)
{
if(ptry[j].x==ptrx[i].x && ptry[j].y==ptrx[i].y)
{
check=0;
for(int m=0;m<k;m++)
if(ptry[j].x==temp[m].x && ptry[j].y==temp[m].y)
check=1;
if(check==0)
{
temp[k].x=ptry[j].x;
temp[k].y=ptry[j].y;
k++;
break;
}
}
}
}
*currentPoint=k;
return temp;
}
__device__ void sortingbyAxis(Point2D *points,Point2D *sorted,int n,int x)
{
int i,j;
Point2D temp;
for(i=0;i<n;i++)
{
sorted[i].x=points[i].x;
sorted[i].y=points[i].y;
}
//sort by x co-ordinate
if(x==1)
{
for(i=0;i<n-1;i++)
{
for(j=0;j<n-i-1;j++)
{
if(sorted[j].x>sorted[j+1].x)
{
temp.x=sorted[j].x;
temp.y=sorted[j].y;
sorted[j].x=sorted[j+1].x;
sorted[j].y=sorted[j+1].y;
sorted[j+1].x=temp.x;
sorted[j+1].y=temp.y;
}
}
}
}
//sort by y co-ordinate
else
{
for(i=0;i<n-1;i++)
{
for(j=0;j<n-i-1;j++)
{
if(sorted[j].y>sorted[j+1].y)
{
temp.x=sorted[j].x;
temp.y=sorted[j].y;
sorted[j].x=sorted[j+1].x;
sorted[j].y=sorted[j+1].y;
sorted[j+1].x=temp.x;
sorted[j+1].y=temp.y;
}
}
}
}
}
__global__ void k_bounding_algorithm(Point2D * points,int n,int k,int *finalArea)
{
int threadId = blockDim.x*blockIdx.x + threadIdx.x;
if(threadId < ((n-k+1)*(n-k+1)))
{
int i=threadId / (n-k+1);
int j=threadId %(n-k+1);
Point2D bottomPoint;
Point2D leftPoint;
Point2D *Rpoints=NULL;
int *area;
int totalpoints=0;
*finalArea=INT_MAX;
__syncthreads();
leftPoint=sortedX[i];
bottomPoint=sortedY[j];
if(leftPoint.x <= bottomPoint.x && leftPoint.y >= bottomPoint.y)
{
int leftEdge=(int)(leftPoint.x-1);
int bottomEdge=(int)(bottomPoint.y-1);
Rpoints=getAboveRightPoints(i,j,sortedX,sortedY,n,&totalpoints);
if(totalpoints>=k)
{
area=getArea(Rpoints,n,leftEdge,bottomEdge,&totalpoints);
free(Rpoints);
sortedArea(area,&totalpoints);
atomicMin(finalArea,area[k-1]);
free(area);
}
}
}
}
int main(void)
{
hipError_t err = hipSuccess;
//hipEvent_t seq_start,seq_stop;
int n=0;
int k=0;
printf("\nEnter Number of Points in plane(n = 2-%d):",maxWIDTH);
fflush(stdin);
scanf("%d",&n);
if(n<=0 || n > maxWIDTH)
{
printf("\nmaxWIDTH value is %d,Please re run and enter n value from %d to %d",maxWIDTH,2,maxWIDTH);
exit(0);
}
printf("\nEnter Number of Points inside square(k) in range from %d to %d:\n",1,n);
fflush(stdin);
scanf("%d",&k);
if(k<=0 || k>n)
{
printf("\n n value is %d,Please re run and enter k value from %d to %d\n",n,2,n);
exit(0);
}
int *h_minArea=NULL;
// hipEventCreate(&seq_start);
// hipEventCreate(&seq_stop);
Point2D *h_points = (Point2D *)malloc(n*sizeof(Point2D));
if (h_points == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
Point2D *h_sortedX = (Point2D *)malloc(maxWIDTH*sizeof(Point2D));
if (h_sortedX == NULL)
{
fprintf(stderr, "Failed to allocate sorted X host vectors!\n");
exit(EXIT_FAILURE);
}
Point2D *h_sortedY = (Point2D *)malloc(maxWIDTH*sizeof(Point2D));
if (h_sortedY == NULL)
{
fprintf(stderr, "Failed to allocate sorted Y host vectors!\n");
exit(EXIT_FAILURE);
}
getRandomInputPoints(n,h_points);
printf("\nPoints:\n");
printArray(h_points,n);
h_minArea = (int*)malloc(sizeof(int));
Point2D *d_points = NULL;
err = hipMalloc((void**)&d_points,n*sizeof(Point2D));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_points,h_points,n*sizeof(Point2D),hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_minArea = NULL;
err = hipMalloc((void**)&d_minArea,sizeof(int));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_minArea,h_minArea,sizeof(int),hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Point2D *d_sortedX = NULL;
err = hipMalloc((void**)&d_sortedX,n*sizeof(Point2D));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device sortedX (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
Point2D *d_sortedY = NULL;
err = hipMalloc((void**)&d_sortedY,n*sizeof(Point2D));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to allocate device sortedY (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_sortedX,h_points,n*sizeof(Point2D),hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(d_sortedY,h_points,n*sizeof(Point2D),hipMemcpyHostToDevice);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// hipEventRecord(seq_start);
hipLaunchKernelGGL(( qsort), dim3(1),dim3(1),0, 0, d_sortedX,0,n-1,1);
hipLaunchKernelGGL(( qsort), dim3(1),dim3(1),0, 0, d_sortedY,0,n-1,0);
err = hipMemcpy(h_sortedX,d_sortedX,n*sizeof(Point2D),hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_sortedY,d_sortedY,n*sizeof(Point2D),hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpyToSymbol(sortedX,h_sortedX,n*sizeof(Point2D));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy sortedX from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpyToSymbol(sortedY,h_sortedY,n*sizeof(Point2D));
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy sortedY from host to device (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipDeviceSynchronize();
int b=(n-k+1);
int blocksPerGrid = ((b*b)/1024)+1;
dim3 threadsPerBlock (1024,1,1);
printf("CUDA kernel launch with %d blocks of 1024 threads\n", blocksPerGrid);
// hipEventRecord(seq_start);
hipLaunchKernelGGL(( k_bounding_algorithm), dim3(blocksPerGrid),dim3(threadsPerBlock), 0, 0, d_points,n,k,d_minArea);
// hipEventRecord(seq_stop);
// hipEventSynchronize(seq_stop);
//float seq_milliseconds = 0;
//err = hipEventElapsedTime(&seq_milliseconds, seq_start, seq_stop);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to calculate elapse time (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("The elapsed time is %.2f ms\n", seq_milliseconds);
err = hipGetLastError();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipMemcpy(h_minArea,d_minArea,sizeof(int),hipMemcpyDeviceToHost);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\nMinimum Area of Square containing %d points out of %d is : %d\n",k,n,*h_minArea);
err = hipFree(d_minArea);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_minArea (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
err = hipFree(d_points);
if (err != hipSuccess)
{
fprintf(stderr, "Failed to free d_points (error code %s)!\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_points);
err = hipDeviceReset();
if (err != hipSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", hipGetErrorString(err));
exit(EXIT_FAILURE);
}
hipFree(d_sortedX);
hipFree(d_sortedY);
return 0;
}
| eb1118da3045ecc3054c11b81808e792c688a4a2.cu | #include <stdio.h>
#include<limits.h>
#include <cuda_runtime.h>
#define maxWIDTH 300
#define BOUND 200
#include<time.h>
#include<quicksort.h>
__constant__ Point2D sortedX[maxWIDTH];
__constant__ Point2D sortedY[maxWIDTH];
__host__ void getRandomInputPoints(int n,Point2D * ptr)
{
srand(0);
int i=0;
int pointx=0,pointy=0;
for(i=0;i<n;i++)
{
pointx=rand()%BOUND;
ptr->x=pointx;
pointy=rand()%BOUND;
ptr->y=pointy;
ptr++;
}
}
__host__ void printArray(Point2D*ptr,int n)
{
int i=0;
for(i=0;i<n;i++)
{
printf("Point %d - X: %d , Y: %d\n",i+1,ptr[i].x,ptr[i].y);
}
}
__device__ int minAreaFunction(int *ptr)
{
int minArea=INT_MAX;
for(int i=0;i<(maxWIDTH*maxWIDTH);i++)
{
if(ptr[i]<minArea && ptr[i]!=0)
{
minArea=ptr[i];
}
}
return minArea;
}
__device__ void sortedArea(int *area,int *currentPoint)
{
int totalpoint=*currentPoint;
int i,j,temp;
for(i=0;i<totalpoint-1;i++)
{
for(j=0;j<totalpoint-i-1;j++)
{
if(area[j]>area[j+1])
{
temp=area[j+1];
area[j+1]=area[j];
area[j]=temp;
}
}
}
}
__device__ int * getArea(Point2D *ptr,int len,int leftEdge,int bottomEdge,int *currentPoint)
{
int totalpoint=*currentPoint;
int *area=(int*)malloc(totalpoint * sizeof(int));
int i=0;
int ar=0;
for(i=0;i<totalpoint;i++)
{
if((ptr[i].x+1-leftEdge) > (ptr[i].y+1-bottomEdge))
{
ar=(int)(ptr[i].x+1-leftEdge)*((ptr[i].x+1-leftEdge));
}
else
{
ar=(int)(ptr[i].y+1-bottomEdge)*((ptr[i].y+1-bottomEdge));
}
area[i]=ar;
}
return area;
}
__device__ Point2D* getAboveRightPoints(int startx,int starty,Point2D* ptrx,Point2D* ptry,int len,int *currentPoint)
{
int k=0;
int i,j;
int count=0;
int totalpoint=*currentPoint;
Point2D* temp;
int check=0;
for(i=startx;i<len;i++)
{
for(j=starty;j<len;j++)
{
if(ptrx[i].x==ptry[j].x && ptrx[i].y==ptry[j].y)
{
count++;
break;
}
}
}
totalpoint=count;
temp=(Point2D*)malloc(totalpoint*sizeof(Point2D));
for(j=starty;j<len;j++)
{
for(i=startx;i<len;i++)
{
if(ptry[j].x==ptrx[i].x && ptry[j].y==ptrx[i].y)
{
check=0;
for(int m=0;m<k;m++)
if(ptry[j].x==temp[m].x && ptry[j].y==temp[m].y)
check=1;
if(check==0)
{
temp[k].x=ptry[j].x;
temp[k].y=ptry[j].y;
k++;
break;
}
}
}
}
*currentPoint=k;
return temp;
}
__device__ void sortingbyAxis(Point2D *points,Point2D *sorted,int n,int x)
{
int i,j;
Point2D temp;
for(i=0;i<n;i++)
{
sorted[i].x=points[i].x;
sorted[i].y=points[i].y;
}
//sort by x co-ordinate
if(x==1)
{
for(i=0;i<n-1;i++)
{
for(j=0;j<n-i-1;j++)
{
if(sorted[j].x>sorted[j+1].x)
{
temp.x=sorted[j].x;
temp.y=sorted[j].y;
sorted[j].x=sorted[j+1].x;
sorted[j].y=sorted[j+1].y;
sorted[j+1].x=temp.x;
sorted[j+1].y=temp.y;
}
}
}
}
//sort by y co-ordinate
else
{
for(i=0;i<n-1;i++)
{
for(j=0;j<n-i-1;j++)
{
if(sorted[j].y>sorted[j+1].y)
{
temp.x=sorted[j].x;
temp.y=sorted[j].y;
sorted[j].x=sorted[j+1].x;
sorted[j].y=sorted[j+1].y;
sorted[j+1].x=temp.x;
sorted[j+1].y=temp.y;
}
}
}
}
}
__global__ void k_bounding_algorithm(Point2D * points,int n,int k,int *finalArea)
{
int threadId = blockDim.x*blockIdx.x + threadIdx.x;
if(threadId < ((n-k+1)*(n-k+1)))
{
int i=threadId / (n-k+1);
int j=threadId %(n-k+1);
Point2D bottomPoint;
Point2D leftPoint;
Point2D *Rpoints=NULL;
int *area;
int totalpoints=0;
*finalArea=INT_MAX;
__syncthreads();
leftPoint=sortedX[i];
bottomPoint=sortedY[j];
if(leftPoint.x <= bottomPoint.x && leftPoint.y >= bottomPoint.y)
{
int leftEdge=(int)(leftPoint.x-1);
int bottomEdge=(int)(bottomPoint.y-1);
Rpoints=getAboveRightPoints(i,j,sortedX,sortedY,n,&totalpoints);
if(totalpoints>=k)
{
area=getArea(Rpoints,n,leftEdge,bottomEdge,&totalpoints);
free(Rpoints);
sortedArea(area,&totalpoints);
atomicMin(finalArea,area[k-1]);
free(area);
}
}
}
}
int main(void)
{
cudaError_t err = cudaSuccess;
//cudaEvent_t seq_start,seq_stop;
int n=0;
int k=0;
printf("\nEnter Number of Points in plane(n = 2-%d):",maxWIDTH);
fflush(stdin);
scanf("%d",&n);
if(n<=0 || n > maxWIDTH)
{
printf("\nmaxWIDTH value is %d,Please re run and enter n value from %d to %d",maxWIDTH,2,maxWIDTH);
exit(0);
}
printf("\nEnter Number of Points inside square(k) in range from %d to %d:\n",1,n);
fflush(stdin);
scanf("%d",&k);
if(k<=0 || k>n)
{
printf("\n n value is %d,Please re run and enter k value from %d to %d\n",n,2,n);
exit(0);
}
int *h_minArea=NULL;
// cudaEventCreate(&seq_start);
// cudaEventCreate(&seq_stop);
Point2D *h_points = (Point2D *)malloc(n*sizeof(Point2D));
if (h_points == NULL)
{
fprintf(stderr, "Failed to allocate host vectors!\n");
exit(EXIT_FAILURE);
}
Point2D *h_sortedX = (Point2D *)malloc(maxWIDTH*sizeof(Point2D));
if (h_sortedX == NULL)
{
fprintf(stderr, "Failed to allocate sorted X host vectors!\n");
exit(EXIT_FAILURE);
}
Point2D *h_sortedY = (Point2D *)malloc(maxWIDTH*sizeof(Point2D));
if (h_sortedY == NULL)
{
fprintf(stderr, "Failed to allocate sorted Y host vectors!\n");
exit(EXIT_FAILURE);
}
getRandomInputPoints(n,h_points);
printf("\nPoints:\n");
printArray(h_points,n);
h_minArea = (int*)malloc(sizeof(int));
Point2D *d_points = NULL;
err = cudaMalloc((void**)&d_points,n*sizeof(Point2D));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_points,h_points,n*sizeof(Point2D),cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
int *d_minArea = NULL;
err = cudaMalloc((void**)&d_minArea,sizeof(int));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device vector A (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_minArea,h_minArea,sizeof(int),cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Point2D *d_sortedX = NULL;
err = cudaMalloc((void**)&d_sortedX,n*sizeof(Point2D));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device sortedX (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
Point2D *d_sortedY = NULL;
err = cudaMalloc((void**)&d_sortedY,n*sizeof(Point2D));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to allocate device sortedY (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_sortedX,h_points,n*sizeof(Point2D),cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(d_sortedY,h_points,n*sizeof(Point2D),cudaMemcpyHostToDevice);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// cudaEventRecord(seq_start);
qsort<<<1,1,0>>>(d_sortedX,0,n-1,1);
qsort<<<1,1,0>>>(d_sortedY,0,n-1,0);
err = cudaMemcpy(h_sortedX,d_sortedX,n*sizeof(Point2D),cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_sortedY,d_sortedY,n*sizeof(Point2D),cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy minArea from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpyToSymbol(sortedX,h_sortedX,n*sizeof(Point2D));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy sortedX from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpyToSymbol(sortedY,h_sortedY,n*sizeof(Point2D));
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy sortedY from host to device (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaDeviceSynchronize();
int b=(n-k+1);
int blocksPerGrid = ((b*b)/1024)+1;
dim3 threadsPerBlock (1024,1,1);
printf("CUDA kernel launch with %d blocks of 1024 threads\n", blocksPerGrid);
// cudaEventRecord(seq_start);
k_bounding_algorithm<<<blocksPerGrid,threadsPerBlock>>>(d_points,n,k,d_minArea);
// cudaEventRecord(seq_stop);
// cudaEventSynchronize(seq_stop);
//float seq_milliseconds = 0;
//err = cudaEventElapsedTime(&seq_milliseconds, seq_start, seq_stop);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to calculate elapse time (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
// printf("The elapsed time is %.2f ms\n", seq_milliseconds);
err = cudaGetLastError();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to launch vectorAdd kernel (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaMemcpy(h_minArea,d_minArea,sizeof(int),cudaMemcpyDeviceToHost);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to copy vector A from device to host (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
printf("\nMinimum Area of Square containing %d points out of %d is : %d\n",k,n,*h_minArea);
err = cudaFree(d_minArea);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_minArea (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
err = cudaFree(d_points);
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to free d_points (error code %s)!\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
free(h_points);
err = cudaDeviceReset();
if (err != cudaSuccess)
{
fprintf(stderr, "Failed to deinitialize the device! error=%s\n", cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
cudaFree(d_sortedX);
cudaFree(d_sortedY);
return 0;
}
|
b0b321022b5da7232d0c9feb75d00cc419a790dc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <io.h>
#include <string.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#include "C:\Program Files\boost_1_62_0\boost\asio\buffer.hpp"
#include "C:\Program Files\boost_1_62_0\boost\asio.hpp"
#include "C:\Program Files\boost_1_62_0\boost\system\error_code.hpp"
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
using namespace boost::asio::ip;
std::string stock;
std::fstream temp_stream;
const int window_height = 720;
const int window_width = 1280;
int days_read = 0;
float max_close_price = 0.0;
float min_close_price = 9999999999.99;
float max_result = -999999999.99, min_result = 999999999.99;
class data_t
{
public:
float open;
float high;
float low;
float close;
float volume;
float adj_close;
char date[12];
};
std::vector<data_t> prices;
float * results;
GLuint program;
GLint attribute_coord2d;
void print_text()
{
glColor3f(0, 0, 0);
glRasterPos2f(-0.5, -0.5);
int i;
char price_buffer[10];
char idx_buffer[64];
char label[64];
sprintf(label, "PRICE OF %s", stock.c_str());
glRasterPos2f(-0.145,0.9);
for (size_t j = 0; j < strlen(label); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, label[j]);
}
glColor3f(1.0, 0, 0);
glRasterPos2f(-0.16, -0.1);
char label2[64];
sprintf(label2, "ON BALANCE VOLUME OF %s", stock.c_str());
for (size_t j = 0; j < strlen(label2); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, label2[j]);
}
for (float i = 0; i < 7; i++)
{
float p = min_close_price + (max_close_price - min_close_price)*i / 6.0;
sprintf(price_buffer, "%.2f", p);
glColor3f(0, 0, 0);
glRasterPos2f(-0.9, 0.2 + 0.1 * i);
for (size_t j = 0; j < strlen(price_buffer); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, price_buffer[j]);
}
p = min_result + (max_result - min_result)*i / 6.0;
sprintf(idx_buffer, "%.2f", p);
glColor3f(1.0, 0, 0);
glRasterPos2f(-0.98, -0.8 + 0.1 * i);
for (size_t j = 0; j < strlen(idx_buffer); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, idx_buffer[j]);
}
}
int day, idx = 0;
for (float f = -0.8; f <= 1.0; f += 0.4)
{
day = (days_read-1) / 4 * idx++;
glColor3f(0, 0, 0);
glRasterPos2f(f-0.05, 0.12);
for (size_t j = 0; j < strlen(prices[day].date); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, prices[day].date[j]);
}
glColor3f(1.0, 0, 0);
glRasterPos2f(f - 0.05, -0.88);
for (size_t j = 0; j < strlen(prices[day].date); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, prices[day].date[j]);
}
}
}
struct Point
{
float x, y;
unsigned char r, g, b, a;
};
std::vector< Point > points;
std::vector< Point > upper_coor;
std::vector <Point> result_points;
std::vector <Point> lower_coor;
void display(void)
{
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glOrtho(-50, 50, -50, 50, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// draw
glColor3ub(255, 255, 255);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &points[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &points[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, points.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &upper_coor[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &upper_coor[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, upper_coor.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &result_points[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &result_points[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, result_points.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &lower_coor[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &lower_coor[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, lower_coor.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
print_text();
glFlush();
glutSwapBuffers();
}
void reshape(int w, int h)
{
glViewport(0, 0, w, h);
}
__global__ void GPU_computation(data_t * prices, float * results, int offset)
{
int i = threadIdx.x + offset;
if (i == 0) results[i] = 0;
else
{
if (prices[i].close > prices[i - 1].close) results[i] = prices[i].volume;
else if (prices[i].close < prices[i - 1].close) results[i] = -prices[i].volume;
else results[i] = 0;
}
}
hipError_t generate_cuda(std::vector<data_t> &prices, float * results);
void read_from_internet()
{
std::cout << "Enter the symbol of a stock in capital letters" << std::endl;
std::cout << "Enter the starting and ending data in the following format:" << std::endl;
std::cout << "fasdf " << std::endl;
std::cout << "Invalid input has undefined behavior" << std::endl;
std::cin >> stock;
int start_month, start_date, start_year, end_month, end_date, end_year;
scanf("%d %d %d %d %d %d", &start_month, &start_date, &start_year, &end_month, &end_date, &end_year);
std::cout << start_month << " " << start_date <<" " << start_year <<" " << end_month << " " << end_date<<" " << end_year << std::endl;
stock[stock.length()] = 0;
start_month--; end_month--;
//int start_month = 1, start_date = 13, start_year = 2016, end_month = 10, end_date = 13, end_year = 2016;
boost::system::error_code error;
boost::asio::io_service io_service;
tcp::socket socket(io_service);
tcp::resolver resolver(io_service);
tcp::resolver::query query("chart.finance.yahoo.com", "http");
tcp::resolver::iterator i = resolver.resolve(query);
boost::asio::connect(socket, i);
boost::asio::streambuf request, response;
std::ostream request_stream(&request);
request_stream << "GET /table.csv?s=" << stock << "&a=" << start_month << "&b="
<< start_date << "&c=" << start_year << "&d=" << end_month << "&e="
<< end_date << "&f=" << "2016" << "&g=d&ignore=.csv HTTP/1.1\r\nHost: chart.finance.yahoo.com\r\n\r\n";
boost::asio::write(socket, request);
std::istream response_stream(&response);
temp_stream.open("temp.txt", std::fstream::in | std::fstream::out | std::fstream::trunc);
char buffer[4096];
char start_sign[8];
int start_flag = 0;
sprintf(start_sign, "%d-", end_year);
char temp[10];
temp[8] = 0;
for (int k = 0; k < 1000000; k++)
{
size_t x = boost::asio::read_until(socket, response, "\n", error);
response_stream.getline(buffer, x);
std::cout << buffer << std::endl;
if (strlen(buffer) < 5 && start_flag) break;
if (!start_flag) {
memcpy(temp, buffer, 5);
temp[5] = 0;
if (strcmp(temp, start_sign) == 0) {
start_flag = 1;
temp_stream << buffer << "\n";
days_read++;
std::cout << buffer << std::endl;
}
}
//else std::cout << buffer << std::endl;
else { temp_stream << buffer << "\n"; days_read++; std::cout << buffer << std::endl;
}
//memcpy(temp, buffer + 2, 8);
}
temp_stream.seekp(0);
}
void read_prices(std::vector<data_t> & prices)
{
data_t * temp = new data_t();
std::string str;
const char * ptr = NULL;
int i = 0;
while (std::getline(temp_stream, str))
{
ptr = str.c_str();
memcpy(temp->date, ptr, 10);
temp->date[10] = 0;
ptr = strchr(ptr, ',') + 1;
temp->open = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->high = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->low = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->close = atof(ptr);
if (max_close_price < temp->close) max_close_price = temp->close;
if (min_close_price > temp->close) min_close_price = temp->close;
ptr = strchr(ptr, ',') + 1;
temp->volume = atoi(ptr);
ptr = strchr(ptr, ',') + 1;
temp->adj_close = atof(ptr);
prices.push_back(*temp);
}
free(temp);
temp_stream.close();
//fb.close();
temp = NULL;
return;
}
int glut_window(int argc, char ** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Random Points");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
// populate points
for (size_t i = 0; i < days_read; ++i)
{
Point pt;
pt.x = (float) i / (float) days_read * 1.6 - 0.8;
pt.y = (prices[i].close - min_close_price) / (max_close_price - min_close_price) * 0.6 + 0.2;
pt.r = 0;
pt.g = 0;
pt.b = 0;
pt.a = 255;
points.push_back(pt);
}
for (int l = 0; l < 1; l++) {
Point pt1, pt2, pt3;
pt1.x = -0.85;
pt1.y = 0.85;
pt1.r = 0;
pt1.g = 0;
pt1.b = 0;
pt1.a = 255;
pt2.x = -0.85;
pt2.y = 0.15;
pt2.r = 0;
pt2.g = 0;
pt2.b = 0;
pt2.a = 255;
pt3.x = 0.85;
pt3.y = 0.15;
pt3.r = 0;
pt3.g = 0;
pt3.b = 0;
pt3.a = 255;
upper_coor.push_back(pt1);
upper_coor.push_back(pt2);
upper_coor.push_back(pt3);
}
for (size_t i = 0; i < days_read; ++i)
{
Point pt;
pt.x = (float)i / (float)days_read * 1.6 - 0.8;
pt.y = (results[i] - min_result) / (max_result - min_result) * 0.6 - 0.8;
pt.r = 255;
pt.g = 0;
pt.b = 0;
pt.a = 255;
result_points.push_back(pt);
}
for (int l = 0; l < 1; l++) {
Point pt1, pt2, pt3;
pt1.x = -0.85;
pt1.y = -0.15;
pt1.r = 255;
pt1.g = 0;
pt1.b = 0;
pt1.a = 255;
pt2.x = -0.85;
pt2.y = -0.85;
pt2.r = 255;
pt2.g = 0;
pt2.b = 0;
pt2.a = 255;
pt3.x = 0.85;
pt3.y = -0.85;
pt3.r = 255;
pt3.g = 0;
pt3.b = 0;
pt3.a = 255;
lower_coor.push_back(pt1);
lower_coor.push_back(pt2);
lower_coor.push_back(pt3);
}
glutMainLoop();
return 0;
}
void process_results()
{
// for OBV
for (int i = 1; i < prices.size(); i++) {
results[i] += results[i - 1];
if (results[i] > max_result) max_result = results[i];
if (results[i] < min_result) min_result = results[i];
}
}
int main(int argc, char** argv)
{
read_from_internet();
read_prices(prices);
std::reverse(prices.begin(), prices.end());
results = (float*) calloc(prices.size(), sizeof(float));
generate_cuda(prices, results);
std::cout << "the size of the vector is" << prices.size() << std::endl;
process_results();
std::cout << results[5] << " " << results[120] << " " << std::endl;
int d;
printf("finished\n");
glut_window(argc, argv);
scanf("%d", &d);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
hipError_t generate_cuda(std::vector<data_t> &prices, float * results)
{
data_t * price_array = NULL;
size_t days = prices.size();
float * dev_results = NULL;
hipMalloc((void**)&price_array, days * sizeof(data_t));
hipMalloc((void**)&dev_results, days*sizeof(float));
hipMemcpy(price_array, &(prices[0]), days * sizeof(data_t), hipMemcpyHostToDevice);
hipError_t cudaStatus;
for (size_t i = 0; i < days; i += 250)
{
if (days - i >= 250)
{
GPU_computation << <1, 250 >> >(price_array, dev_results, (int)i);
hipDeviceSynchronize();
}
else
{
GPU_computation << <1, days-i >> >(price_array, dev_results, (int)i);
hipDeviceSynchronize();
}
}
hipMemcpy(results, dev_results, sizeof(float)*days , hipMemcpyDeviceToHost);
hipFree(dev_results);
hipFree(price_array);
return hipSuccess;
}
| b0b321022b5da7232d0c9feb75d00cc419a790dc.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <io.h>
#include <string.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#include "C:\Program Files\boost_1_62_0\boost\asio\buffer.hpp"
#include "C:\Program Files\boost_1_62_0\boost\asio.hpp"
#include "C:\Program Files\boost_1_62_0\boost\system\error_code.hpp"
#include "Dependencies\glew\glew.h"
#include "Dependencies\freeglut\freeglut.h"
using namespace boost::asio::ip;
std::string stock;
std::fstream temp_stream;
const int window_height = 720;
const int window_width = 1280;
int days_read = 0;
float max_close_price = 0.0;
float min_close_price = 9999999999.99;
float max_result = -999999999.99, min_result = 999999999.99;
class data_t
{
public:
float open;
float high;
float low;
float close;
float volume;
float adj_close;
char date[12];
};
std::vector<data_t> prices;
float * results;
GLuint program;
GLint attribute_coord2d;
void print_text()
{
glColor3f(0, 0, 0);
glRasterPos2f(-0.5, -0.5);
int i;
char price_buffer[10];
char idx_buffer[64];
char label[64];
sprintf(label, "PRICE OF %s", stock.c_str());
glRasterPos2f(-0.145,0.9);
for (size_t j = 0; j < strlen(label); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, label[j]);
}
glColor3f(1.0, 0, 0);
glRasterPos2f(-0.16, -0.1);
char label2[64];
sprintf(label2, "ON BALANCE VOLUME OF %s", stock.c_str());
for (size_t j = 0; j < strlen(label2); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_18, label2[j]);
}
for (float i = 0; i < 7; i++)
{
float p = min_close_price + (max_close_price - min_close_price)*i / 6.0;
sprintf(price_buffer, "%.2f", p);
glColor3f(0, 0, 0);
glRasterPos2f(-0.9, 0.2 + 0.1 * i);
for (size_t j = 0; j < strlen(price_buffer); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, price_buffer[j]);
}
p = min_result + (max_result - min_result)*i / 6.0;
sprintf(idx_buffer, "%.2f", p);
glColor3f(1.0, 0, 0);
glRasterPos2f(-0.98, -0.8 + 0.1 * i);
for (size_t j = 0; j < strlen(idx_buffer); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, idx_buffer[j]);
}
}
int day, idx = 0;
for (float f = -0.8; f <= 1.0; f += 0.4)
{
day = (days_read-1) / 4 * idx++;
glColor3f(0, 0, 0);
glRasterPos2f(f-0.05, 0.12);
for (size_t j = 0; j < strlen(prices[day].date); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, prices[day].date[j]);
}
glColor3f(1.0, 0, 0);
glRasterPos2f(f - 0.05, -0.88);
for (size_t j = 0; j < strlen(prices[day].date); j++)
{
glutBitmapCharacter(GLUT_BITMAP_HELVETICA_10, prices[day].date[j]);
}
}
}
struct Point
{
float x, y;
unsigned char r, g, b, a;
};
std::vector< Point > points;
std::vector< Point > upper_coor;
std::vector <Point> result_points;
std::vector <Point> lower_coor;
void display(void)
{
glClearColor(1.0, 1.0, 1.0, 1.0);
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
//glOrtho(-50, 50, -50, 50, -1, 1);
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
// draw
glColor3ub(255, 255, 255);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &points[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &points[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, points.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &upper_coor[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &upper_coor[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, upper_coor.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &result_points[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &result_points[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, result_points.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
glEnableClientState(GL_VERTEX_ARRAY);
glEnableClientState(GL_COLOR_ARRAY);
glVertexPointer(2, GL_FLOAT, sizeof(Point), &lower_coor[0].x);
glColorPointer(4, GL_UNSIGNED_BYTE, sizeof(Point), &lower_coor[0].r);
glPointSize(3.0);
glDrawArrays(GL_LINE_STRIP, 0, lower_coor.size());
glDisableClientState(GL_VERTEX_ARRAY);
glDisableClientState(GL_COLOR_ARRAY);
print_text();
glFlush();
glutSwapBuffers();
}
void reshape(int w, int h)
{
glViewport(0, 0, w, h);
}
__global__ void GPU_computation(data_t * prices, float * results, int offset)
{
int i = threadIdx.x + offset;
if (i == 0) results[i] = 0;
else
{
if (prices[i].close > prices[i - 1].close) results[i] = prices[i].volume;
else if (prices[i].close < prices[i - 1].close) results[i] = -prices[i].volume;
else results[i] = 0;
}
}
cudaError_t generate_cuda(std::vector<data_t> &prices, float * results);
void read_from_internet()
{
std::cout << "Enter the symbol of a stock in capital letters" << std::endl;
std::cout << "Enter the starting and ending data in the following format:" << std::endl;
std::cout << "fasdf " << std::endl;
std::cout << "Invalid input has undefined behavior" << std::endl;
std::cin >> stock;
int start_month, start_date, start_year, end_month, end_date, end_year;
scanf("%d %d %d %d %d %d", &start_month, &start_date, &start_year, &end_month, &end_date, &end_year);
std::cout << start_month << " " << start_date <<" " << start_year <<" " << end_month << " " << end_date<<" " << end_year << std::endl;
stock[stock.length()] = 0;
start_month--; end_month--;
//int start_month = 1, start_date = 13, start_year = 2016, end_month = 10, end_date = 13, end_year = 2016;
boost::system::error_code error;
boost::asio::io_service io_service;
tcp::socket socket(io_service);
tcp::resolver resolver(io_service);
tcp::resolver::query query("chart.finance.yahoo.com", "http");
tcp::resolver::iterator i = resolver.resolve(query);
boost::asio::connect(socket, i);
boost::asio::streambuf request, response;
std::ostream request_stream(&request);
request_stream << "GET /table.csv?s=" << stock << "&a=" << start_month << "&b="
<< start_date << "&c=" << start_year << "&d=" << end_month << "&e="
<< end_date << "&f=" << "2016" << "&g=d&ignore=.csv HTTP/1.1\r\nHost: chart.finance.yahoo.com\r\n\r\n";
boost::asio::write(socket, request);
std::istream response_stream(&response);
temp_stream.open("temp.txt", std::fstream::in | std::fstream::out | std::fstream::trunc);
char buffer[4096];
char start_sign[8];
int start_flag = 0;
sprintf(start_sign, "%d-", end_year);
char temp[10];
temp[8] = 0;
for (int k = 0; k < 1000000; k++)
{
size_t x = boost::asio::read_until(socket, response, "\n", error);
response_stream.getline(buffer, x);
std::cout << buffer << std::endl;
if (strlen(buffer) < 5 && start_flag) break;
if (!start_flag) {
memcpy(temp, buffer, 5);
temp[5] = 0;
if (strcmp(temp, start_sign) == 0) {
start_flag = 1;
temp_stream << buffer << "\n";
days_read++;
std::cout << buffer << std::endl;
}
}
//else std::cout << buffer << std::endl;
else { temp_stream << buffer << "\n"; days_read++; std::cout << buffer << std::endl;
}
//memcpy(temp, buffer + 2, 8);
}
temp_stream.seekp(0);
}
void read_prices(std::vector<data_t> & prices)
{
data_t * temp = new data_t();
std::string str;
const char * ptr = NULL;
int i = 0;
while (std::getline(temp_stream, str))
{
ptr = str.c_str();
memcpy(temp->date, ptr, 10);
temp->date[10] = 0;
ptr = strchr(ptr, ',') + 1;
temp->open = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->high = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->low = atof(ptr);
ptr = strchr(ptr, ',') + 1;
temp->close = atof(ptr);
if (max_close_price < temp->close) max_close_price = temp->close;
if (min_close_price > temp->close) min_close_price = temp->close;
ptr = strchr(ptr, ',') + 1;
temp->volume = atoi(ptr);
ptr = strchr(ptr, ',') + 1;
temp->adj_close = atof(ptr);
prices.push_back(*temp);
}
free(temp);
temp_stream.close();
//fb.close();
temp = NULL;
return;
}
int glut_window(int argc, char ** argv)
{
glutInit(&argc, argv);
glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH | GLUT_DOUBLE);
glutInitWindowSize(window_width, window_height);
glutCreateWindow("Random Points");
glutDisplayFunc(display);
glutReshapeFunc(reshape);
// populate points
for (size_t i = 0; i < days_read; ++i)
{
Point pt;
pt.x = (float) i / (float) days_read * 1.6 - 0.8;
pt.y = (prices[i].close - min_close_price) / (max_close_price - min_close_price) * 0.6 + 0.2;
pt.r = 0;
pt.g = 0;
pt.b = 0;
pt.a = 255;
points.push_back(pt);
}
for (int l = 0; l < 1; l++) {
Point pt1, pt2, pt3;
pt1.x = -0.85;
pt1.y = 0.85;
pt1.r = 0;
pt1.g = 0;
pt1.b = 0;
pt1.a = 255;
pt2.x = -0.85;
pt2.y = 0.15;
pt2.r = 0;
pt2.g = 0;
pt2.b = 0;
pt2.a = 255;
pt3.x = 0.85;
pt3.y = 0.15;
pt3.r = 0;
pt3.g = 0;
pt3.b = 0;
pt3.a = 255;
upper_coor.push_back(pt1);
upper_coor.push_back(pt2);
upper_coor.push_back(pt3);
}
for (size_t i = 0; i < days_read; ++i)
{
Point pt;
pt.x = (float)i / (float)days_read * 1.6 - 0.8;
pt.y = (results[i] - min_result) / (max_result - min_result) * 0.6 - 0.8;
pt.r = 255;
pt.g = 0;
pt.b = 0;
pt.a = 255;
result_points.push_back(pt);
}
for (int l = 0; l < 1; l++) {
Point pt1, pt2, pt3;
pt1.x = -0.85;
pt1.y = -0.15;
pt1.r = 255;
pt1.g = 0;
pt1.b = 0;
pt1.a = 255;
pt2.x = -0.85;
pt2.y = -0.85;
pt2.r = 255;
pt2.g = 0;
pt2.b = 0;
pt2.a = 255;
pt3.x = 0.85;
pt3.y = -0.85;
pt3.r = 255;
pt3.g = 0;
pt3.b = 0;
pt3.a = 255;
lower_coor.push_back(pt1);
lower_coor.push_back(pt2);
lower_coor.push_back(pt3);
}
glutMainLoop();
return 0;
}
void process_results()
{
// for OBV
for (int i = 1; i < prices.size(); i++) {
results[i] += results[i - 1];
if (results[i] > max_result) max_result = results[i];
if (results[i] < min_result) min_result = results[i];
}
}
int main(int argc, char** argv)
{
read_from_internet();
read_prices(prices);
std::reverse(prices.begin(), prices.end());
results = (float*) calloc(prices.size(), sizeof(float));
generate_cuda(prices, results);
std::cout << "the size of the vector is" << prices.size() << std::endl;
process_results();
std::cout << results[5] << " " << results[120] << " " << std::endl;
int d;
printf("finished\n");
glut_window(argc, argv);
scanf("%d", &d);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
cudaError_t generate_cuda(std::vector<data_t> &prices, float * results)
{
data_t * price_array = NULL;
size_t days = prices.size();
float * dev_results = NULL;
cudaMalloc((void**)&price_array, days * sizeof(data_t));
cudaMalloc((void**)&dev_results, days*sizeof(float));
cudaMemcpy(price_array, &(prices[0]), days * sizeof(data_t), cudaMemcpyHostToDevice);
cudaError_t cudaStatus;
for (size_t i = 0; i < days; i += 250)
{
if (days - i >= 250)
{
GPU_computation << <1, 250 >> >(price_array, dev_results, (int)i);
cudaDeviceSynchronize();
}
else
{
GPU_computation << <1, days-i >> >(price_array, dev_results, (int)i);
cudaDeviceSynchronize();
}
}
cudaMemcpy(results, dev_results, sizeof(float)*days , cudaMemcpyDeviceToHost);
cudaFree(dev_results);
cudaFree(price_array);
return cudaSuccess;
}
|
ef9645c7222e08b71ec5fe94e7dbe78b24006f36.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaDefs.h>
#include <ctime>
#include <cmath>
#include <rng.h>
#include <benchmark.h>
//WARNING!!! Do not change TPB and NO_FORCES for this demo !!!
constexpr unsigned int TPB = 128;
constexpr unsigned int NO_FORCES = 256;
//constexpr unsigned int NO_RAIN_DROPS = 1 << 20;
constexpr unsigned int NO_RAIN_DROPS = 50;
constexpr unsigned int MEM_BLOCKS_PER_THREAD_BLOCK = 8;
hipError_t error = hipSuccess;
hipDeviceProp_t deviceProp = hipDeviceProp_t();
using namespace std;
template<typename T>
inline T sqr(const T val) {
return val * val;
}
inline float vecLen(const float3 &vec) {
return sqrt(sqr(vec.x) + sqr(vec.y) + sqr(vec.z));
}
inline void normalize(float3 &vec) {
float len = vecLen(vec);
vec.x /= len;
vec.y /= len;
vec.z /= len;
}
float3 *createData(const unsigned int length, float min, float max) {
// float3 *data = new float3[length];
float3 *data = static_cast<float3 *>(::operator new(sizeof(float3) * length));
for (size_t i = 0; i < length; i++) {
data[i] = make_float3(rng(min, max), rng(min, max), rng(min, max));
// normalize(data[i]);
}
return data;
}
void printData(const float3 *data, const unsigned int length) {
if (data == 0) return;
const float3 *ptr = data;
for (unsigned int i = 0; i < length; i++, ptr++) {
printf("%5.2f %5.2f %5.2f ", ptr->x, ptr->y, ptr->z);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Sums the forces to get the final one using parallel reduction.
/// WARNING!!! The method was written to meet input requirements of our example, i.e. 128 threads and 256 forces </summary>
/// <param name="dForces"> The forces. </param>
/// <param name="noForces"> The number of forces. </param>
/// <param name="dFinalForce"> [in,out] If non-null, the final force. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduce(const float3 *__restrict__ dForces, const unsigned int noForces, float3 *__restrict__ dFinalForce) {
__shared__ float3 sForces[TPB]; //SEE THE WARNING MESSAGE !!!
unsigned int tid = threadIdx.x;
unsigned int next = TPB; //SEE THE WARNING MESSAGE !!!
float3 *src = &sForces[tid];
float3 *src2 = const_cast<float3 *>(&dForces[tid + next]);
*src = dForces[tid];
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
next >>= 1; // divide by 2, 128 -> 64
if (tid >= next)
return;
// 64 threads
src2 = src + next;
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
next >>= 1; // divide by 2, 64 -> 32
if (tid >= next)
return;
// 32 threads
src2 = src + next;
volatile float3 *vSrc = &sForces[tid];
volatile float3 *vSrc2 = src2;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2, 32 -> 16
if (tid >= next)
return;
// 16 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 8 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 4 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 2 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 1 thread
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
// next >>= 1; // divide by 2
// Good practice to always secure, taht only one thread writes final value
if (tid == 0) {
dFinalForce->x = vSrc->x;
dFinalForce->y = vSrc->y;
dFinalForce->z = vSrc->z;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Adds the FinalForce to every Rain drops position. </summary>
/// <param name="dFinalForce"> The final force. </param>
/// <param name="noRainDrops"> The number of rain drops. </param>
/// <param name="dRainDrops"> [in,out] If non-null, the rain drops positions. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void add(const float3 *__restrict__ dFinalForce, const unsigned int noRainDrops, float3 *__restrict__ dRainDrops) {
const float3 finalForce = *dFinalForce;
unsigned int offset = blockDim.x;
unsigned int index = MEM_BLOCKS_PER_THREAD_BLOCK * blockIdx.x * offset + threadIdx.x;
float3 *ptr = &dRainDrops[index];
#pragma unroll MEM_BLOCKS_PER_THREAD_BLOCK
for (unsigned int i = 0; i < MEM_BLOCKS_PER_THREAD_BLOCK; i++) {
if (index >= noRainDrops)
return;
ptr->x += finalForce.x;
ptr->y += finalForce.y;
ptr->z += finalForce.z;
ptr += offset;
index += offset;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
float3 *hForces = createData(NO_FORCES, 0, 1);
float3 *hDrops = createData(NO_RAIN_DROPS, 0, 10000);
float3 *dForces = nullptr;
float3 *dDrops = nullptr;
float3 *dFinalForce = nullptr;
checkCudaErrors(hipMalloc((void **) &dForces, NO_FORCES * sizeof(float3)));
checkCudaErrors(hipMemcpy(dForces, hForces, NO_FORCES * sizeof(float3), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &dDrops, NO_RAIN_DROPS * sizeof(float3)));
checkCudaErrors(hipMemcpy(dDrops, hDrops, NO_RAIN_DROPS * sizeof(float3), hipMemcpyHostToDevice));
checkCudaErrors(hipMalloc((void **) &dFinalForce, sizeof(float3)));
KernelSetting ksReduce;
ksReduce.dimGrid = dim3(1, 1, 1);
ksReduce.dimBlock = dim3(TPB, 1, 1);
KernelSetting ksAdd;
ksAdd.dimGrid = dim3(getNumberOfParts(NO_RAIN_DROPS, TPB * MEM_BLOCKS_PER_THREAD_BLOCK), 1, 1);
ksAdd.dimBlock = dim3(TPB, 1, 1);
for (unsigned int i = 0; i < 1000; i++) {
hipLaunchKernelGGL(( reduce), dim3(ksReduce.dimGrid), dim3(ksReduce.dimBlock), 0, 0, dForces, NO_FORCES, dFinalForce);
hipLaunchKernelGGL(( add), dim3(ksAdd.dimGrid), dim3(ksAdd.dimBlock), 0, 0, dFinalForce, NO_RAIN_DROPS, dDrops);
}
checkDeviceMatrix<float>((float *) dFinalForce, sizeof(float3), 1, 3, "%5.2f ", "Final force");
checkDeviceMatrix<float>((float *) dDrops, sizeof(float3), NO_RAIN_DROPS, 3, "%5.2f ", "Final Rain Drops");
SAFE_DELETE_ARRAY(hForces);
SAFE_DELETE_ARRAY(hDrops);
SAFE_DELETE_CUDA(dForces);
SAFE_DELETE_CUDA(dDrops);
SAFE_DELETE_CUDA(dFinalForce);
}
| ef9645c7222e08b71ec5fe94e7dbe78b24006f36.cu | #include <cudaDefs.h>
#include <ctime>
#include <cmath>
#include <rng.h>
#include <benchmark.h>
//WARNING!!! Do not change TPB and NO_FORCES for this demo !!!
constexpr unsigned int TPB = 128;
constexpr unsigned int NO_FORCES = 256;
//constexpr unsigned int NO_RAIN_DROPS = 1 << 20;
constexpr unsigned int NO_RAIN_DROPS = 50;
constexpr unsigned int MEM_BLOCKS_PER_THREAD_BLOCK = 8;
cudaError_t error = cudaSuccess;
cudaDeviceProp deviceProp = cudaDeviceProp();
using namespace std;
template<typename T>
inline T sqr(const T val) {
return val * val;
}
inline float vecLen(const float3 &vec) {
return sqrt(sqr(vec.x) + sqr(vec.y) + sqr(vec.z));
}
inline void normalize(float3 &vec) {
float len = vecLen(vec);
vec.x /= len;
vec.y /= len;
vec.z /= len;
}
float3 *createData(const unsigned int length, float min, float max) {
// float3 *data = new float3[length];
float3 *data = static_cast<float3 *>(::operator new(sizeof(float3) * length));
for (size_t i = 0; i < length; i++) {
data[i] = make_float3(rng(min, max), rng(min, max), rng(min, max));
// normalize(data[i]);
}
return data;
}
void printData(const float3 *data, const unsigned int length) {
if (data == 0) return;
const float3 *ptr = data;
for (unsigned int i = 0; i < length; i++, ptr++) {
printf("%5.2f %5.2f %5.2f ", ptr->x, ptr->y, ptr->z);
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Sums the forces to get the final one using parallel reduction.
/// WARNING!!! The method was written to meet input requirements of our example, i.e. 128 threads and 256 forces </summary>
/// <param name="dForces"> The forces. </param>
/// <param name="noForces"> The number of forces. </param>
/// <param name="dFinalForce"> [in,out] If non-null, the final force. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void reduce(const float3 *__restrict__ dForces, const unsigned int noForces, float3 *__restrict__ dFinalForce) {
__shared__ float3 sForces[TPB]; //SEE THE WARNING MESSAGE !!!
unsigned int tid = threadIdx.x;
unsigned int next = TPB; //SEE THE WARNING MESSAGE !!!
float3 *src = &sForces[tid];
float3 *src2 = const_cast<float3 *>(&dForces[tid + next]);
*src = dForces[tid];
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
next >>= 1; // divide by 2, 128 -> 64
if (tid >= next)
return;
// 64 threads
src2 = src + next;
src->x += src2->x;
src->y += src2->y;
src->z += src2->z;
__syncthreads();
next >>= 1; // divide by 2, 64 -> 32
if (tid >= next)
return;
// 32 threads
src2 = src + next;
volatile float3 *vSrc = &sForces[tid];
volatile float3 *vSrc2 = src2;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2, 32 -> 16
if (tid >= next)
return;
// 16 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 8 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 4 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 2 threads
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
next >>= 1; // divide by 2
if (tid >= next)
return;
// 1 thread
vSrc2 = vSrc + next;
vSrc->x += vSrc2->x;
vSrc->y += vSrc2->y;
vSrc->z += vSrc2->z;
// next >>= 1; // divide by 2
// Good practice to always secure, taht only one thread writes final value
if (tid == 0) {
dFinalForce->x = vSrc->x;
dFinalForce->y = vSrc->y;
dFinalForce->z = vSrc->z;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// <summary> Adds the FinalForce to every Rain drops position. </summary>
/// <param name="dFinalForce"> The final force. </param>
/// <param name="noRainDrops"> The number of rain drops. </param>
/// <param name="dRainDrops"> [in,out] If non-null, the rain drops positions. </param>
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void add(const float3 *__restrict__ dFinalForce, const unsigned int noRainDrops, float3 *__restrict__ dRainDrops) {
const float3 finalForce = *dFinalForce;
unsigned int offset = blockDim.x;
unsigned int index = MEM_BLOCKS_PER_THREAD_BLOCK * blockIdx.x * offset + threadIdx.x;
float3 *ptr = &dRainDrops[index];
#pragma unroll MEM_BLOCKS_PER_THREAD_BLOCK
for (unsigned int i = 0; i < MEM_BLOCKS_PER_THREAD_BLOCK; i++) {
if (index >= noRainDrops)
return;
ptr->x += finalForce.x;
ptr->y += finalForce.y;
ptr->z += finalForce.z;
ptr += offset;
index += offset;
}
}
int main(int argc, char *argv[]) {
initializeCUDA(deviceProp);
float3 *hForces = createData(NO_FORCES, 0, 1);
float3 *hDrops = createData(NO_RAIN_DROPS, 0, 10000);
float3 *dForces = nullptr;
float3 *dDrops = nullptr;
float3 *dFinalForce = nullptr;
checkCudaErrors(cudaMalloc((void **) &dForces, NO_FORCES * sizeof(float3)));
checkCudaErrors(cudaMemcpy(dForces, hForces, NO_FORCES * sizeof(float3), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &dDrops, NO_RAIN_DROPS * sizeof(float3)));
checkCudaErrors(cudaMemcpy(dDrops, hDrops, NO_RAIN_DROPS * sizeof(float3), cudaMemcpyHostToDevice));
checkCudaErrors(cudaMalloc((void **) &dFinalForce, sizeof(float3)));
KernelSetting ksReduce;
ksReduce.dimGrid = dim3(1, 1, 1);
ksReduce.dimBlock = dim3(TPB, 1, 1);
KernelSetting ksAdd;
ksAdd.dimGrid = dim3(getNumberOfParts(NO_RAIN_DROPS, TPB * MEM_BLOCKS_PER_THREAD_BLOCK), 1, 1);
ksAdd.dimBlock = dim3(TPB, 1, 1);
for (unsigned int i = 0; i < 1000; i++) {
reduce<<<ksReduce.dimGrid, ksReduce.dimBlock>>>(dForces, NO_FORCES, dFinalForce);
add<<<ksAdd.dimGrid, ksAdd.dimBlock>>>(dFinalForce, NO_RAIN_DROPS, dDrops);
}
checkDeviceMatrix<float>((float *) dFinalForce, sizeof(float3), 1, 3, "%5.2f ", "Final force");
checkDeviceMatrix<float>((float *) dDrops, sizeof(float3), NO_RAIN_DROPS, 3, "%5.2f ", "Final Rain Drops");
SAFE_DELETE_ARRAY(hForces);
SAFE_DELETE_ARRAY(hDrops);
SAFE_DELETE_CUDA(dForces);
SAFE_DELETE_CUDA(dDrops);
SAFE_DELETE_CUDA(dFinalForce);
}
|
ef47b05d2778c35a2e0ee45788b28233df64f2e9.hip | // !!! This is a file automatically generated by hipify!!!
#include <benchmark/benchmark.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <hip/hip_runtime.h>
#include "scope/init/init.hpp"
#include "scope/utils/utils.hpp"
#include "args.hpp"
template <typename T, int COARSINING_FACTOR = 1, int BLOCK_SIZE = 1>
__global__ void cuda_vector_add(T *in1, T *in2, T *out, size_t len) {
// todo: implement COARSINING_FACTOR
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (index < len) {
out[index] = in1[index] + in2[index];
}
}
template <typename T, int COARSINING_FACTOR = 1, int BLOCK_SIZE = 128>
static void CUDA_VECTOR_ADD(benchmark::State &state) {
if (!has_cuda) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC no CUDA device found");
return;
}
const size_t N = state.range(0);
const dim3 blockDim(BLOCK_SIZE);
const dim3 gridDim(ceil(((float) N) / blockDim.x));
if (gridDim.x >= cuda_device_prop.maxGridSize[0]) {
const auto str = fmt::format("CUDA/VECTOR_ADD/BASIC the grid dimension {} exceeds the max grid dimensions {}",
gridDim.x, cuda_device_prop.maxGridSize[0]);
state.SkipWithError(str.c_str());
return;
}
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
const auto str = fmt::format("CUDA/VECTOR_ADD/BASIC the grid dimension {} exceeds the max grid dimensions {}",
gridDim.x, CUDA_MAX_GRID_SIZE);
state.SkipWithError(str.c_str());
return;
}
auto a = std::vector<T>(N);
auto b = std::vector<T>(N);
auto c = std::vector<T>(N);
std::fill(a.begin(), a.end(), 1);
std::fill(b.begin(), b.end(), 1);
std::fill(c.begin(), c.end(), 0);
T *d_a{nullptr}, *d_b{nullptr}, *d_c{nullptr};
if (PRINT_IF_ERROR(hipMalloc((void **) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector A");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector A");
return;
}
defer(hipFree(d_a));
if (PRINT_IF_ERROR(hipMalloc((void **) &d_b, b.size() * sizeof(*b.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector B");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector B");
return;
}
defer(hipFree(d_b));
if (PRINT_IF_ERROR(hipMalloc((void **) &d_c, c.size() * sizeof(*c.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector C");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector C");
return;
}
defer(hipFree(d_c));
if (PRINT_IF_ERROR(hipMemcpy(d_a, a.data(), a.size() * sizeof(*a.data()), hipMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector A");
return;
}
if (PRINT_IF_ERROR(hipMemcpy(d_b, b.data(), b.size() * sizeof(*b.data()), hipMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector B");
return;
}
if (PRINT_IF_ERROR(hipMemcpy(d_c, c.data(), c.size() * sizeof(*c.data()), hipMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector C");
return;
}
hipEvent_t start, stop;
PRINT_IF_ERROR(hipEventCreate(&start));
PRINT_IF_ERROR(hipEventCreate(&stop));
for (auto _ : state) {
hipEventRecord(start, NULL);
hipLaunchKernelGGL(( cuda_vector_add<T, COARSINING_FACTOR, BLOCK_SIZE>), dim3(gridDim), dim3(blockDim), 0, 0, d_a, d_b, d_c, N);
hipEventRecord(stop, NULL);
const auto cuda_err = hipEventSynchronize(stop);
state.PauseTiming();
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC failed to launch kernel");
break;
}
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC failed to get elapsed time");
break;
}
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"N", N},
{"BLOCK_SIZE", BLOCK_SIZE},
{"Flops", {1.0 * N, benchmark::Counter::kAvgThreadsRate}},
{"COARSINING_FACTOR", COARSINING_FACTOR}});
state.SetBytesProcessed(int64_t(state.iterations()) * N * sizeof(T));
state.SetItemsProcessed(int64_t(state.iterations()) * N);
}
#ifndef FAST_MODE
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 256)->ALL_ARGS()->UseManualTime();
#endif // FAST_MODE
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 512)->ALL_ARGS()->UseManualTime();
| ef47b05d2778c35a2e0ee45788b28233df64f2e9.cu |
#include <benchmark/benchmark.h>
#include <iostream>
#include <numeric>
#include <stdio.h>
#include <stdlib.h>
#include <vector>
#include <cuda_runtime.h>
#include "scope/init/init.hpp"
#include "scope/utils/utils.hpp"
#include "args.hpp"
template <typename T, int COARSINING_FACTOR = 1, int BLOCK_SIZE = 1>
__global__ void cuda_vector_add(T *in1, T *in2, T *out, size_t len) {
// todo: implement COARSINING_FACTOR
int index = threadIdx.x + blockIdx.x * BLOCK_SIZE;
if (index < len) {
out[index] = in1[index] + in2[index];
}
}
template <typename T, int COARSINING_FACTOR = 1, int BLOCK_SIZE = 128>
static void CUDA_VECTOR_ADD(benchmark::State &state) {
if (!has_cuda) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC no CUDA device found");
return;
}
const size_t N = state.range(0);
const dim3 blockDim(BLOCK_SIZE);
const dim3 gridDim(ceil(((float) N) / blockDim.x));
if (gridDim.x >= cuda_device_prop.maxGridSize[0]) {
const auto str = fmt::format("CUDA/VECTOR_ADD/BASIC the grid dimension {} exceeds the max grid dimensions {}",
gridDim.x, cuda_device_prop.maxGridSize[0]);
state.SkipWithError(str.c_str());
return;
}
if (gridDim.x >= CUDA_MAX_GRID_SIZE) {
const auto str = fmt::format("CUDA/VECTOR_ADD/BASIC the grid dimension {} exceeds the max grid dimensions {}",
gridDim.x, CUDA_MAX_GRID_SIZE);
state.SkipWithError(str.c_str());
return;
}
auto a = std::vector<T>(N);
auto b = std::vector<T>(N);
auto c = std::vector<T>(N);
std::fill(a.begin(), a.end(), 1);
std::fill(b.begin(), b.end(), 1);
std::fill(c.begin(), c.end(), 0);
T *d_a{nullptr}, *d_b{nullptr}, *d_c{nullptr};
if (PRINT_IF_ERROR(cudaMalloc((void **) &d_a, a.size() * sizeof(*a.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector A");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector A");
return;
}
defer(cudaFree(d_a));
if (PRINT_IF_ERROR(cudaMalloc((void **) &d_b, b.size() * sizeof(*b.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector B");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector B");
return;
}
defer(cudaFree(d_b));
if (PRINT_IF_ERROR(cudaMalloc((void **) &d_c, c.size() * sizeof(*c.data())))) {
LOG(critical, "CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector C");
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory allocation failed for vector C");
return;
}
defer(cudaFree(d_c));
if (PRINT_IF_ERROR(cudaMemcpy(d_a, a.data(), a.size() * sizeof(*a.data()), cudaMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector A");
return;
}
if (PRINT_IF_ERROR(cudaMemcpy(d_b, b.data(), b.size() * sizeof(*b.data()), cudaMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector B");
return;
}
if (PRINT_IF_ERROR(cudaMemcpy(d_c, c.data(), c.size() * sizeof(*c.data()), cudaMemcpyHostToDevice))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC device memory copy failed for vector C");
return;
}
cudaEvent_t start, stop;
PRINT_IF_ERROR(cudaEventCreate(&start));
PRINT_IF_ERROR(cudaEventCreate(&stop));
for (auto _ : state) {
cudaEventRecord(start, NULL);
cuda_vector_add<T, COARSINING_FACTOR, BLOCK_SIZE><<<gridDim, blockDim>>>(d_a, d_b, d_c, N);
cudaEventRecord(stop, NULL);
const auto cuda_err = cudaEventSynchronize(stop);
state.PauseTiming();
if (PRINT_IF_ERROR(cuda_err)) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC failed to launch kernel");
break;
}
float msecTotal = 0.0f;
if (PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop))) {
state.SkipWithError("CUDA/VECTOR_ADD/BASIC failed to get elapsed time");
break;
}
state.SetIterationTime(msecTotal / 1000);
state.ResumeTiming();
}
state.counters.insert({{"N", N},
{"BLOCK_SIZE", BLOCK_SIZE},
{"Flops", {1.0 * N, benchmark::Counter::kAvgThreadsRate}},
{"COARSINING_FACTOR", COARSINING_FACTOR}});
state.SetBytesProcessed(int64_t(state.iterations()) * N * sizeof(T));
state.SetItemsProcessed(int64_t(state.iterations()) * N);
}
#ifndef FAST_MODE
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 32)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 64)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 128)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 256)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 256)->ALL_ARGS()->UseManualTime();
#endif // FAST_MODE
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, char, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, int, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, float, 1, 512)->ALL_ARGS()->UseManualTime();
BENCHMARK_TEMPLATE(CUDA_VECTOR_ADD, double, 1, 512)->ALL_ARGS()->UseManualTime();
|
ceaca625e6647c53ebc89880c9c38d0fefb6cf47.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Select.cuh>
namespace faiss {
namespace gpu {
// Number of warps that the kernel is instantiated with
constexpr int kWarps = 8;
constexpr int kLanes = kWarpSize;
constexpr int kMaxDistance = std::numeric_limits<int>::max();
// Performs a binary matrix multiplication, returning the lowest k results in
// `vecs` for each `query` in terms of Hamming distance (a fused kernel)
// Each warp calculates distance for a single query
template <int NumWarpQ, int NumThreadQ, typename BinaryType>
__launch_bounds__(kWarps* kLanes) __global__ void binaryDistanceAnySize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<int, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
int,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int warpQuery = blockIdx.x * kWarps + warpId;
bool queryInBounds = warpQuery < query.getSize(0);
// Each warp loops through the entire chunk of vectors
for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// Reduction dimension
for (int blockK = 0; blockK < vecs.getSize(1); blockK += kLanes) {
int laneK = blockK + laneId;
bool kInBounds = laneK < vecs.getSize(1);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
int vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < kLanes; ++i) {
threadDistance +=
__popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
}
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
int id = valInBounds ? blockVec + laneId : -1;
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
// Version of the kernel that avoids a loop over the reduction dimension, and
// thus avoids reloading the query vectors
template <
int NumWarpQ,
int NumThreadQ,
typename BinaryType,
int ReductionLimit = kLanes>
__global__ void __launch_bounds__(kWarps* kLanes) binaryDistanceLimitSize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<int, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
int,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int laneK = laneId;
int warpQuery = blockIdx.x * kWarps + warpId;
bool kInBounds = laneK < vecs.getSize(1);
bool queryInBounds = warpQuery < query.getSize(0);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// Each warp loops through the entire chunk of vectors
for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
int vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < ReductionLimit; ++i) {
threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
int id = valInBounds ? blockVec + laneId : -1;
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
template <typename BinaryType>
void runBinaryDistanceAnySize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
hipStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
hipLaunchKernelGGL(( binaryDistanceAnySize<1, 1, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 32) {
hipLaunchKernelGGL(( binaryDistanceAnySize<32, 2, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 64) {
hipLaunchKernelGGL(( binaryDistanceAnySize<64, 3, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 128) {
hipLaunchKernelGGL(( binaryDistanceAnySize<128, 3, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 256) {
hipLaunchKernelGGL(( binaryDistanceAnySize<256, 4, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 512) {
hipLaunchKernelGGL(( binaryDistanceAnySize<512, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 1024) {
hipLaunchKernelGGL(( binaryDistanceAnySize<1024, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
hipLaunchKernelGGL(( binaryDistanceAnySize<2048, 8, BinaryType>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#endif
}
template <typename BinaryType, int ReductionLimit>
void runBinaryDistanceLimitSize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
hipStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<1, 1, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 32) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<32, 2, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 64) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<64, 3, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 128) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<128, 3, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 256) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<256, 4, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 512) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<512, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
} else if (k <= 1024) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<1024, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
hipLaunchKernelGGL(( binaryDistanceLimitSize<2048, 8, BinaryType, ReductionLimit>)
, dim3(grid), dim3(block), 0, stream, vecs, query, outK, outV, k);
}
#endif
}
void runBinaryDistance(
Tensor<unsigned char, 2, true>& vecs,
Tensor<unsigned char, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
hipStream_t stream) {
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(vecs.getSize(1) == query.getSize(1));
FAISS_ASSERT(outK.getSize(1) == k);
FAISS_ASSERT(outV.getSize(1) == k);
// For the optimized uint32 kernel, we handle 32 * 8 = 256 max dims
constexpr int kReductionLimit32 = 8;
// For the optimized uint8 kernel, we handle 8 * 16 = 128 max dims
constexpr int kReductionLimit8 = 16;
// All other cases (large or small) go through the general kernel
if (vecs.getSize(1) % sizeof(unsigned int) == 0 &&
(vecs.getSize(1) / sizeof(unsigned int)) <= kReductionLimit32) {
auto vecs32 = vecs.castResize<unsigned int>();
auto query32 = query.castResize<unsigned int>();
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned int, kReductionLimit32>(
vecs32, query32, outK, outV, k, stream);
} else if (vecs.getSize(1) <= kReductionLimit8) {
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned char, kReductionLimit8>(
vecs, query, outK, outV, k, stream);
} else {
// Arbitrary size kernel
runBinaryDistanceAnySize<unsigned char>(
vecs, query, outK, outV, k, stream);
}
}
} // namespace gpu
} // namespace faiss
| ceaca625e6647c53ebc89880c9c38d0fefb6cf47.cu | /**
* Copyright (c) Facebook, Inc. and its affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
#include <faiss/gpu/utils/DeviceUtils.h>
#include <faiss/gpu/utils/DeviceDefs.cuh>
#include <faiss/gpu/utils/DeviceTensor.cuh>
#include <faiss/gpu/utils/Select.cuh>
namespace faiss {
namespace gpu {
// Number of warps that the kernel is instantiated with
constexpr int kWarps = 8;
constexpr int kLanes = kWarpSize;
constexpr int kMaxDistance = std::numeric_limits<int>::max();
// Performs a binary matrix multiplication, returning the lowest k results in
// `vecs` for each `query` in terms of Hamming distance (a fused kernel)
// Each warp calculates distance for a single query
template <int NumWarpQ, int NumThreadQ, typename BinaryType>
__launch_bounds__(kWarps* kLanes) __global__ void binaryDistanceAnySize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<int, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
int,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int warpQuery = blockIdx.x * kWarps + warpId;
bool queryInBounds = warpQuery < query.getSize(0);
// Each warp loops through the entire chunk of vectors
for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// Reduction dimension
for (int blockK = 0; blockK < vecs.getSize(1); blockK += kLanes) {
int laneK = blockK + laneId;
bool kInBounds = laneK < vecs.getSize(1);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
int vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < kLanes; ++i) {
threadDistance +=
__popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
}
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
int id = valInBounds ? blockVec + laneId : -1;
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
// Version of the kernel that avoids a loop over the reduction dimension, and
// thus avoids reloading the query vectors
template <
int NumWarpQ,
int NumThreadQ,
typename BinaryType,
int ReductionLimit = kLanes>
__global__ void __launch_bounds__(kWarps* kLanes) binaryDistanceLimitSize(
const Tensor<BinaryType, 2, true> vecs,
const Tensor<BinaryType, 2, true> query,
Tensor<int, 2, true> outK,
Tensor<int, 2, true> outV,
int k) {
// A matrix tile (query, k)
__shared__ BinaryType queryTile[kWarps][kLanes + 1]; // avoid bank conflict
// B matrix tile (vec, k)
__shared__ BinaryType vecTile[kLanes][kLanes + 1]; // avoid bank conflict
WarpSelect<
int,
int,
false,
Comparator<int>,
NumWarpQ,
NumThreadQ,
kWarps * kLanes>
heap(kMaxDistance, -1, k);
int warpId = threadIdx.y;
int laneId = threadIdx.x;
// Each warp handles a single query
int laneK = laneId;
int warpQuery = blockIdx.x * kWarps + warpId;
bool kInBounds = laneK < vecs.getSize(1);
bool queryInBounds = warpQuery < query.getSize(0);
queryTile[warpId][laneId] =
queryInBounds && kInBounds ? query[warpQuery][laneK] : 0;
// Each warp loops through the entire chunk of vectors
for (int blockVec = 0; blockVec < vecs.getSize(0); blockVec += kLanes) {
int threadDistance = 0;
// kWarps warps are responsible for loading 32 vecs
#pragma unroll
for (int i = 0; i < kLanes / kWarps; ++i) {
int warpVec = i * kWarps + warpId;
int vec = blockVec + warpVec;
bool vecInBounds = vec < vecs.getSize(0);
vecTile[warpVec][laneId] =
vecInBounds && kInBounds ? vecs[vec][laneK] : 0;
}
__syncthreads();
// Compare distances
#pragma unroll
for (int i = 0; i < ReductionLimit; ++i) {
threadDistance += __popc(queryTile[warpId][i] ^ vecTile[laneId][i]);
}
__syncthreads();
// Lanes within a warp are different vec results against the same query
// Only submit distances which represent real (query, vec) pairs
bool valInBounds =
queryInBounds && (blockVec + laneId < vecs.getSize(0));
threadDistance = valInBounds ? threadDistance : kMaxDistance;
int id = valInBounds ? blockVec + laneId : -1;
heap.add(threadDistance, id);
}
heap.reduce();
if (warpQuery < query.getSize(0)) {
heap.writeOut(outK[warpQuery].data(), outV[warpQuery].data(), k);
}
}
template <typename BinaryType>
void runBinaryDistanceAnySize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
cudaStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
binaryDistanceAnySize<1, 1, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 32) {
binaryDistanceAnySize<32, 2, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 64) {
binaryDistanceAnySize<64, 3, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 128) {
binaryDistanceAnySize<128, 3, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 256) {
binaryDistanceAnySize<256, 4, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 512) {
binaryDistanceAnySize<512, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 1024) {
binaryDistanceAnySize<1024, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
binaryDistanceAnySize<2048, 8, BinaryType>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#endif
}
template <typename BinaryType, int ReductionLimit>
void runBinaryDistanceLimitSize(
Tensor<BinaryType, 2, true>& vecs,
Tensor<BinaryType, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
cudaStream_t stream) {
dim3 grid(utils::divUp(query.getSize(0), kWarps));
dim3 block(kLanes, kWarps);
if (k == 1) {
binaryDistanceLimitSize<1, 1, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 32) {
binaryDistanceLimitSize<32, 2, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 64) {
binaryDistanceLimitSize<64, 3, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 128) {
binaryDistanceLimitSize<128, 3, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 256) {
binaryDistanceLimitSize<256, 4, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 512) {
binaryDistanceLimitSize<512, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
} else if (k <= 1024) {
binaryDistanceLimitSize<1024, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#if GPU_MAX_SELECTION_K >= 2048
else if (k <= 2048) {
binaryDistanceLimitSize<2048, 8, BinaryType, ReductionLimit>
<<<grid, block, 0, stream>>>(vecs, query, outK, outV, k);
}
#endif
}
void runBinaryDistance(
Tensor<unsigned char, 2, true>& vecs,
Tensor<unsigned char, 2, true>& query,
Tensor<int, 2, true>& outK,
Tensor<int, 2, true>& outV,
int k,
cudaStream_t stream) {
FAISS_ASSERT(k <= GPU_MAX_SELECTION_K);
FAISS_ASSERT(vecs.getSize(1) == query.getSize(1));
FAISS_ASSERT(outK.getSize(1) == k);
FAISS_ASSERT(outV.getSize(1) == k);
// For the optimized uint32 kernel, we handle 32 * 8 = 256 max dims
constexpr int kReductionLimit32 = 8;
// For the optimized uint8 kernel, we handle 8 * 16 = 128 max dims
constexpr int kReductionLimit8 = 16;
// All other cases (large or small) go through the general kernel
if (vecs.getSize(1) % sizeof(unsigned int) == 0 &&
(vecs.getSize(1) / sizeof(unsigned int)) <= kReductionLimit32) {
auto vecs32 = vecs.castResize<unsigned int>();
auto query32 = query.castResize<unsigned int>();
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned int, kReductionLimit32>(
vecs32, query32, outK, outV, k, stream);
} else if (vecs.getSize(1) <= kReductionLimit8) {
// Optimize for vectors with dimensions a multiple of 32 that are less
// than 32 * kReductionLimit (256) dimensions in size
runBinaryDistanceLimitSize<unsigned char, kReductionLimit8>(
vecs, query, outK, outV, k, stream);
} else {
// Arbitrary size kernel
runBinaryDistanceAnySize<unsigned char>(
vecs, query, outK, outV, k, stream);
}
}
} // namespace gpu
} // namespace faiss
|
17831803ceafba11f486a7a69d33f08be0402cc2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//pass
//--gridDim=512 --blockDim=256
//REQUIRES: const array as formal (imperial edit)
#define min(x,y) (x < y ? x : y)
#ifndef DOUBLE_PRECISION
typedef float real;
#else
typedef double real;
#endif
#
//Number of time steps
#define NUM_STEPS 2048
//Max option batch size
#define MAX_OPTIONS 1024
#define TIME_STEPS 16
#define CACHE_DELTA (2 * TIME_STEPS)
#define CACHE_SIZE (256)
#define CACHE_STEP (CACHE_SIZE - CACHE_DELTA)
#if NUM_STEPS % CACHE_DELTA
#error Bad constants
#endif
//Preprocessed input option data
typedef struct
{
real S;
real X;
real vDt;
real puByDf;
real pdByDf;
} __TOptionData;
#if 0 // imperial edit
static __constant__ __TOptionData d_OptionData[MAX_OPTIONS];
static __device__ float d_CallValue[MAX_OPTIONS];
static __device__ real d_CallBuffer[MAX_OPTIONS * (NUM_STEPS + 16)];
#endif
////////////////////////////////////////////////////////////////////////////////
// Overloaded shortcut functions for different precision modes
////////////////////////////////////////////////////////////////////////////////
#ifndef DOUBLE_PRECISION
__device__ static __attribute__((always_inline)) float expiryCallValue(float S, float X, float vDt, int i)
{
real d = S * expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0) ? d : 0;
}
#else
__device__ static __attribute__((always_inline)) double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0) ? d : 0;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// GPU kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void binomialOptionsKernel(
__TOptionData *d_OptionData, // imperial edit
float *d_CallValue, // imperial edit
real *d_CallBuffer // imperial edit
) {
__shared__ real callA[CACHE_SIZE+1];
__shared__ real callB[CACHE_SIZE+1];
//Global memory frame for current option (thread block)
real *const d_Call = &d_CallBuffer[blockIdx.x * (NUM_STEPS + 16)];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
//Compute values at expiry date
for (int i = tid;
__global_invariant((unsigned)i % CACHE_SIZE == tid),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
i <= NUM_STEPS; i += CACHE_SIZE)
{
d_Call[i] = expiryCallValue(S, X, vDt, i);
}
//#if 0
//Walk down binomial tree
//So double-buffer and synchronize to avoid read-after-write hazards.
for (int i = NUM_STEPS;
__global_invariant(i <= NUM_STEPS),
__global_invariant(__read_implies(d_CallBuffer, (__read_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
i > 0; i -= CACHE_DELTA)
for (int c_base = 0;
__global_invariant(__read_implies(d_CallBuffer, (__read_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
c_base < i; c_base += CACHE_STEP)
{
//Start and end positions within shared memory cache
int c_start = min(CACHE_SIZE - 1, i - c_base);
int c_end = c_start - CACHE_DELTA;
//Read data(with apron) to shared memory
__syncthreads();
if (tid <= c_start)
{
callA[tid] = d_Call[c_base + tid];
}
//Calculations within shared memory
for (int k = c_start - 1; k >= c_end;)
{
//Compute discounted expected value
__syncthreads();
callB[tid] = puByDf * callA[tid + 1] + pdByDf * callA[tid];
k--;
//Compute discounted expected value
__syncthreads();
callA[tid] = puByDf * callB[tid + 1] + pdByDf * callB[tid];
k--;
}
//Flush shared memory cache
__syncthreads();
if (tid <= c_end)
{
d_Call[c_base + tid] = callA[tid];
}
}
//Write the value at the top of the tree to destination buffer
if (threadIdx.x == 0)
{
d_CallValue[blockIdx.x] = (float)callA[0];
}
//#endif
}
| 17831803ceafba11f486a7a69d33f08be0402cc2.cu | //pass
//--gridDim=512 --blockDim=256
//REQUIRES: const array as formal (imperial edit)
#define min(x,y) (x < y ? x : y)
#ifndef DOUBLE_PRECISION
typedef float real;
#else
typedef double real;
#endif
#
//Number of time steps
#define NUM_STEPS 2048
//Max option batch size
#define MAX_OPTIONS 1024
#define TIME_STEPS 16
#define CACHE_DELTA (2 * TIME_STEPS)
#define CACHE_SIZE (256)
#define CACHE_STEP (CACHE_SIZE - CACHE_DELTA)
#if NUM_STEPS % CACHE_DELTA
#error Bad constants
#endif
//Preprocessed input option data
typedef struct
{
real S;
real X;
real vDt;
real puByDf;
real pdByDf;
} __TOptionData;
#if 0 // imperial edit
static __constant__ __TOptionData d_OptionData[MAX_OPTIONS];
static __device__ float d_CallValue[MAX_OPTIONS];
static __device__ real d_CallBuffer[MAX_OPTIONS * (NUM_STEPS + 16)];
#endif
////////////////////////////////////////////////////////////////////////////////
// Overloaded shortcut functions for different precision modes
////////////////////////////////////////////////////////////////////////////////
#ifndef DOUBLE_PRECISION
__device__ static __attribute__((always_inline)) float expiryCallValue(float S, float X, float vDt, int i)
{
real d = S * expf(vDt * (2.0f * i - NUM_STEPS)) - X;
return (d > 0) ? d : 0;
}
#else
__device__ static __attribute__((always_inline)) double expiryCallValue(double S, double X, double vDt, int i)
{
double d = S * exp(vDt * (2.0 * i - NUM_STEPS)) - X;
return (d > 0) ? d : 0;
}
#endif
////////////////////////////////////////////////////////////////////////////////
// GPU kernel
////////////////////////////////////////////////////////////////////////////////
__global__ void binomialOptionsKernel(
__TOptionData *d_OptionData, // imperial edit
float *d_CallValue, // imperial edit
real *d_CallBuffer // imperial edit
) {
__shared__ real callA[CACHE_SIZE+1];
__shared__ real callB[CACHE_SIZE+1];
//Global memory frame for current option (thread block)
real *const d_Call = &d_CallBuffer[blockIdx.x * (NUM_STEPS + 16)];
const int tid = threadIdx.x;
const real S = d_OptionData[blockIdx.x].S;
const real X = d_OptionData[blockIdx.x].X;
const real vDt = d_OptionData[blockIdx.x].vDt;
const real puByDf = d_OptionData[blockIdx.x].puByDf;
const real pdByDf = d_OptionData[blockIdx.x].pdByDf;
//Compute values at expiry date
for (int i = tid;
__global_invariant((unsigned)i % CACHE_SIZE == tid),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
i <= NUM_STEPS; i += CACHE_SIZE)
{
d_Call[i] = expiryCallValue(S, X, vDt, i);
}
//#if 0
//Walk down binomial tree
//So double-buffer and synchronize to avoid read-after-write hazards.
for (int i = NUM_STEPS;
__global_invariant(i <= NUM_STEPS),
__global_invariant(__read_implies(d_CallBuffer, (__read_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
i > 0; i -= CACHE_DELTA)
for (int c_base = 0;
__global_invariant(__read_implies(d_CallBuffer, (__read_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
__global_invariant(__write_implies(d_CallBuffer, (__write_offset_bytes(d_CallBuffer)/sizeof(real))/(NUM_STEPS + 16) == blockIdx.x)),
c_base < i; c_base += CACHE_STEP)
{
//Start and end positions within shared memory cache
int c_start = min(CACHE_SIZE - 1, i - c_base);
int c_end = c_start - CACHE_DELTA;
//Read data(with apron) to shared memory
__syncthreads();
if (tid <= c_start)
{
callA[tid] = d_Call[c_base + tid];
}
//Calculations within shared memory
for (int k = c_start - 1; k >= c_end;)
{
//Compute discounted expected value
__syncthreads();
callB[tid] = puByDf * callA[tid + 1] + pdByDf * callA[tid];
k--;
//Compute discounted expected value
__syncthreads();
callA[tid] = puByDf * callB[tid + 1] + pdByDf * callB[tid];
k--;
}
//Flush shared memory cache
__syncthreads();
if (tid <= c_end)
{
d_Call[c_base + tid] = callA[tid];
}
}
//Write the value at the top of the tree to destination buffer
if (threadIdx.x == 0)
{
d_CallValue[blockIdx.x] = (float)callA[0];
}
//#endif
}
|
72c70db919c7373273a5471452ba9e43ee6ddf34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "kernel_scanNaiveSumHirizontal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *_d_out_integralImage = NULL;
hipMalloc(&_d_out_integralImage, XSIZE*YSIZE);
int _h_width = XSIZE;
int _h_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
kernel_scanNaiveSumHirizontal), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_h_width,_h_height);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
kernel_scanNaiveSumHirizontal), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_h_width,_h_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
kernel_scanNaiveSumHirizontal), dim3(gridBlock),dim3(threadBlock), 0, 0, _d_out_integralImage,_h_width,_h_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 72c70db919c7373273a5471452ba9e43ee6ddf34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "kernel_scanNaiveSumHirizontal.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
unsigned int *_d_out_integralImage = NULL;
cudaMalloc(&_d_out_integralImage, XSIZE*YSIZE);
int _h_width = XSIZE;
int _h_height = YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
kernel_scanNaiveSumHirizontal<<<gridBlock,threadBlock>>>(_d_out_integralImage,_h_width,_h_height);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
kernel_scanNaiveSumHirizontal<<<gridBlock,threadBlock>>>(_d_out_integralImage,_h_width,_h_height);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
kernel_scanNaiveSumHirizontal<<<gridBlock,threadBlock>>>(_d_out_integralImage,_h_width,_h_height);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
9af13d8a8a9d5416a7addbc08353df9df8883751.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
All modification made by Intel Corporation: 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_ohem_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossOHEMLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data,
top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossOHEMLayer);
} // namespace caffe
| 9af13d8a8a9d5416a7addbc08353df9df8883751.cu | /*
All modification made by Intel Corporation: © 2016 Intel Corporation
All contributions by the University of California:
Copyright (c) 2014, 2015, The Regents of the University of California (Regents)
All rights reserved.
All other contributions:
Copyright (c) 2014, 2015, the respective contributors
All rights reserved.
For the list of contributors go to https://github.com/BVLC/caffe/blob/master/CONTRIBUTORS.md
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <algorithm>
#include <cfloat>
#include <vector>
#include "caffe/layers/softmax_loss_ohem_layer.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxLossForwardGPU(const int nthreads,
const Dtype* prob_data, const Dtype* label, Dtype* loss,
const int num, const int dim, const int spatial_dim,
const bool has_ignore_label_, const int ignore_label_,
Dtype* counts) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
loss[index] = 0;
counts[index] = 0;
} else {
loss[index] = -log(max(prob_data[n * dim + label_value * spatial_dim + s],
Dtype(FLT_MIN)));
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossOHEMLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_);
const Dtype* prob_data = prob_.gpu_data();
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is not used for anything until it is overwritten
// on the backward pass, we use it here to avoid having to allocate new GPU
// memory to accumulate intermediate results in the kernel.
Dtype* loss_data = bottom[0]->mutable_gpu_diff();
// Similarly, this memory is never used elsewhere, and thus we can use it
// to avoid having to allocate additional GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype loss;
caffe_gpu_asum(nthreads, loss_data, &loss);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_,
valid_count);
if (top.size() >= 2) {
top[1]->ShareData(prob_);
}
if (top.size() >= 3) {
// Output per-instance loss
caffe_gpu_memcpy(top[2]->count() * sizeof(Dtype), loss_data,
top[2]->mutable_gpu_data());
}
// Fix a bug, which happens when propagate_down[0] = false in backward
caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff());
}
template <typename Dtype>
__global__ void SoftmaxLossBackwardGPU(const int nthreads, const Dtype* top,
const Dtype* label, Dtype* bottom_diff, const int num, const int dim,
const int spatial_dim, const bool has_ignore_label_,
const int ignore_label_, Dtype* counts) {
const int channels = dim / spatial_dim;
CUDA_KERNEL_LOOP(index, nthreads) {
const int n = index / spatial_dim;
const int s = index % spatial_dim;
const int label_value = static_cast<int>(label[n * spatial_dim + s]);
if (has_ignore_label_ && label_value == ignore_label_) {
for (int c = 0; c < channels; ++c) {
bottom_diff[n * dim + c * spatial_dim + s] = 0;
}
counts[index] = 0;
} else {
bottom_diff[n * dim + label_value * spatial_dim + s] -= 1;
counts[index] = 1;
}
}
}
template <typename Dtype>
void SoftmaxWithLossOHEMLayer<Dtype>::Backward_gpu(
const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (propagate_down[1]) {
LOG(FATAL) << this->type()
<< " Layer cannot backpropagate to label inputs.";
}
if (propagate_down[0]) {
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const Dtype* prob_data = prob_.gpu_data();
const Dtype* top_data = top[0]->gpu_data();
caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff);
const Dtype* label = bottom[1]->gpu_data();
const int dim = prob_.count() / outer_num_;
const int nthreads = outer_num_ * inner_num_;
// Since this memory is never used for anything else,
// we use to to avoid allocating new GPU memory.
Dtype* counts = prob_.mutable_gpu_diff();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads),
CAFFE_CUDA_NUM_THREADS>>>(nthreads, top_data, label, bottom_diff,
outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts);
Dtype valid_count = -1;
// Only launch another CUDA kernel if we actually need the count of valid
// outputs.
if (normalization_ == LossParameter_NormalizationMode_VALID &&
has_ignore_label_) {
caffe_gpu_asum(nthreads, counts, &valid_count);
}
const Dtype loss_weight = top[0]->cpu_diff()[0] /
get_normalizer(normalization_, valid_count);
caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff);
}
}
INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxWithLossOHEMLayer);
} // namespace caffe
|
64c29b7d67e7502cd034738e7446a60201f3e5e3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "sepia.h"
#include <math.h>
#include <malloc.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <plugin.h>
#include <string.h>
extern "C" void __init_sepia_cuda(int* result, plugin_action_info_t *init_data) {
if(init_data == NULL || result == NULL)
{
*result = 1;
return;
}
init_data->name = (char*)malloc(3*sizeof(char));
strcpy(init_data->name, "-scuda");
init_data->func_ptr = (void*)&sepia;
init_data->instruction = (char*)"sepia effect based on CUDA";
init_data->argc = 0;
init_data->type = FUNC_TRANSFORM;
*result = 0;
}
__global__ void cuda_func(pixel_t* pixels) {
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
static const float c[3][3] = {
{ .393f, .769f, .189f },
{ .349f, .686f, .168f },
{ .272f, .543f, .131f } };
struct pixel_t old = pixels[i];
int r = old.r * c[0][0] + old.g * c[0][1] + old.b * c[0][2];
int g = old.r * c[1][0] + old.g * c[1][1] + old.b * c[1][2];
int b = old.r * c[2][0] + old.g * c[2][1] + old.b * c[2][2];
pixels[i].r = (unsigned char)min(255, r);
pixels[i].g = (unsigned char)min(255, g);
pixels[i].b = (unsigned char)min(255, b);
}
/*static struct pixel_t setPixel(struct image_t* const image, int x, int y) {
static const float c[3][3] = {
{ .393f, .769f, .189f },
{ .349f, .686f, .168f },
{ .272f, .543f, .131f } };
struct pixel_t const old = image->pixels[y * (image->width) + x];
struct pixel_t pixel;
pixel.r = sat(old.r * c[0][0] + old.g * c[0][1] + old.b * c[0][2]);
pixel.g = sat(old.r * c[1][0] + old.g * c[1][1] + old.b * c[1][2]);
pixel.b = sat(old.r * c[2][0] + old.g * c[2][1] + old.b * c[2][2]);
return pixel;
}*/
bmp_transform_error_code_t
sepia(struct image_t* const src, struct image_t* const result, char** argv) {
size_t N = src->width*src->height;
clock_t begin, end;
double time_spent;
struct pixel_t* dev_pixels, dev_new;
result->width = src->width;
result->height = src->height;
result->pixels = (struct pixel_t*)malloc(sizeof(struct pixel_t) * N);
begin = clock();
hipMalloc(&dev_pixels, N*sizeof(struct pixel_t));
hipMemcpy(dev_pixels, src->pixels, N*sizeof(struct pixel_t), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cuda_func), dim3((N+255) / 256), dim3(256), 0, 0, dev_pixels);
hipMemcpy(result->pixels, dev_pixels, N*sizeof(struct pixel_t), hipMemcpyDeviceToHost);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CUDA sepia time: %f\n", time_spent);
return TRANSFORM_OK;
}
| 64c29b7d67e7502cd034738e7446a60201f3e5e3.cu | #include "sepia.h"
#include <math.h>
#include <malloc.h>
#include <stdlib.h>
#include <time.h>
#include <stdio.h>
#include <plugin.h>
#include <string.h>
extern "C" void __init_sepia_cuda(int* result, plugin_action_info_t *init_data) {
if(init_data == NULL || result == NULL)
{
*result = 1;
return;
}
init_data->name = (char*)malloc(3*sizeof(char));
strcpy(init_data->name, "-scuda");
init_data->func_ptr = (void*)&sepia;
init_data->instruction = (char*)"sepia effect based on CUDA";
init_data->argc = 0;
init_data->type = FUNC_TRANSFORM;
*result = 0;
}
__global__ void cuda_func(pixel_t* pixels) {
size_t i = blockIdx.x*blockDim.x + threadIdx.x;
static const float c[3][3] = {
{ .393f, .769f, .189f },
{ .349f, .686f, .168f },
{ .272f, .543f, .131f } };
struct pixel_t old = pixels[i];
int r = old.r * c[0][0] + old.g * c[0][1] + old.b * c[0][2];
int g = old.r * c[1][0] + old.g * c[1][1] + old.b * c[1][2];
int b = old.r * c[2][0] + old.g * c[2][1] + old.b * c[2][2];
pixels[i].r = (unsigned char)min(255, r);
pixels[i].g = (unsigned char)min(255, g);
pixels[i].b = (unsigned char)min(255, b);
}
/*static struct pixel_t setPixel(struct image_t* const image, int x, int y) {
static const float c[3][3] = {
{ .393f, .769f, .189f },
{ .349f, .686f, .168f },
{ .272f, .543f, .131f } };
struct pixel_t const old = image->pixels[y * (image->width) + x];
struct pixel_t pixel;
pixel.r = sat(old.r * c[0][0] + old.g * c[0][1] + old.b * c[0][2]);
pixel.g = sat(old.r * c[1][0] + old.g * c[1][1] + old.b * c[1][2]);
pixel.b = sat(old.r * c[2][0] + old.g * c[2][1] + old.b * c[2][2]);
return pixel;
}*/
bmp_transform_error_code_t
sepia(struct image_t* const src, struct image_t* const result, char** argv) {
size_t N = src->width*src->height;
clock_t begin, end;
double time_spent;
struct pixel_t* dev_pixels, dev_new;
result->width = src->width;
result->height = src->height;
result->pixels = (struct pixel_t*)malloc(sizeof(struct pixel_t) * N);
begin = clock();
cudaMalloc(&dev_pixels, N*sizeof(struct pixel_t));
cudaMemcpy(dev_pixels, src->pixels, N*sizeof(struct pixel_t), cudaMemcpyHostToDevice);
cuda_func<<<(N+255) / 256, 256>>>(dev_pixels);
cudaMemcpy(result->pixels, dev_pixels, N*sizeof(struct pixel_t), cudaMemcpyDeviceToHost);
end = clock();
time_spent = (double)(end - begin) / CLOCKS_PER_SEC;
printf("CUDA sepia time: %f\n", time_spent);
return TRANSFORM_OK;
}
|
be3744f6f33c31ce68e35a7cb4e5c5ab586713f3.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(0xFFFFFFFF, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val));
hi = __kmpc_impl_shfl_down_sync(0xFFFFFFFF, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(0xFFFFFFFF, lo, delta, size);
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi));
return val;
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t lanemask_lt;
uint32_t lanemask_gt;
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt));
uint32_t Liveness = __ACTIVEMASK();
uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2;
asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt));
do {
Liveness = __ACTIVEMASK();
remote_id = __ffs(Liveness & lanemask_gt);
size = __popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
__device__ static volatile uint32_t IterCnt = 0;
__device__ static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ unsigned Bound;
__shared__ unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
| be3744f6f33c31ce68e35a7cb4e5c5ab586713f3.cu | //===---- reduction.cu - NVPTX OpenMP reduction implementation ---- CUDA
//-*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the implementation of reduction with KMPC interface.
//
//===----------------------------------------------------------------------===//
#include <complex.h>
#include <stdio.h>
#include "omptarget-nvptx.h"
#include "target_impl.h"
EXTERN
void __kmpc_nvptx_end_reduce(int32_t global_tid) {}
EXTERN
void __kmpc_nvptx_end_reduce_nowait(int32_t global_tid) {}
EXTERN int32_t __kmpc_shuffle_int32(int32_t val, int16_t delta, int16_t size) {
return __kmpc_impl_shfl_down_sync(0xFFFFFFFF, val, delta, size);
}
EXTERN int64_t __kmpc_shuffle_int64(int64_t val, int16_t delta, int16_t size) {
int lo, hi;
asm volatile("mov.b64 {%0,%1}, %2;" : "=r"(lo), "=r"(hi) : "l"(val));
hi = __kmpc_impl_shfl_down_sync(0xFFFFFFFF, hi, delta, size);
lo = __kmpc_impl_shfl_down_sync(0xFFFFFFFF, lo, delta, size);
asm volatile("mov.b64 %0, {%1,%2};" : "=l"(val) : "r"(lo), "r"(hi));
return val;
}
INLINE static void gpu_regular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct) {
for (uint32_t mask = WARPSIZE / 2; mask > 0; mask /= 2) {
shflFct(reduce_data, /*LaneId - not used= */ 0,
/*Offset = */ mask, /*AlgoVersion=*/0);
}
}
INLINE static void gpu_irregular_warp_reduce(void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
uint32_t size, uint32_t tid) {
uint32_t curr_size;
uint32_t mask;
curr_size = size;
mask = curr_size / 2;
while (mask > 0) {
shflFct(reduce_data, /*LaneId = */ tid, /*Offset=*/mask, /*AlgoVersion=*/1);
curr_size = (curr_size + 1) / 2;
mask = curr_size / 2;
}
}
INLINE static uint32_t
gpu_irregular_simd_reduce(void *reduce_data, kmp_ShuffleReductFctPtr shflFct) {
uint32_t lanemask_lt;
uint32_t lanemask_gt;
uint32_t size, remote_id, physical_lane_id;
physical_lane_id = GetThreadIdInBlock() % WARPSIZE;
asm("mov.u32 %0, %%lanemask_lt;" : "=r"(lanemask_lt));
uint32_t Liveness = __ACTIVEMASK();
uint32_t logical_lane_id = __popc(Liveness & lanemask_lt) * 2;
asm("mov.u32 %0, %%lanemask_gt;" : "=r"(lanemask_gt));
do {
Liveness = __ACTIVEMASK();
remote_id = __ffs(Liveness & lanemask_gt);
size = __popc(Liveness);
logical_lane_id /= 2;
shflFct(reduce_data, /*LaneId =*/logical_lane_id,
/*Offset=*/remote_id - 1 - physical_lane_id, /*AlgoVersion=*/2);
} while (logical_lane_id % 2 == 0 && size > 1);
return (logical_lane_id == 0);
}
EXTERN
int32_t __kmpc_nvptx_simd_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) {
gpu_regular_warp_reduce(reduce_data, shflFct);
return GetThreadIdInBlock() % WARPSIZE ==
0; // Result on lane 0 of the simd warp.
} else {
return gpu_irregular_simd_reduce(
reduce_data, shflFct); // Result on the first active lane.
}
}
INLINE
static int32_t nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
bool isSPMDExecutionMode, bool isRuntimeUninitialized) {
uint32_t BlockThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
uint32_t NumThreads = GetNumberOfOmpThreads(isSPMDExecutionMode);
if (NumThreads == 1)
return 1;
/*
* This reduce function handles reduction within a team. It handles
* parallel regions in both L1 and L2 parallelism levels. It also
* supports Generic, SPMD, and NoOMP modes.
*
* 1. Reduce within a warp.
* 2. Warp master copies value to warp 0 via shared memory.
* 3. Warp 0 reduces to a single value.
* 4. The reduced value is available in the thread that returns 1.
*/
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = BlockThreadId / WARPSIZE;
// Volta execution model:
// For the Generic execution mode a parallel region either has 1 thread and
// beyond that, always a multiple of 32. For the SPMD execution mode we may
// have any number of threads.
if ((NumThreads % WARPSIZE == 0) || (WarpId < WarpsNeeded - 1))
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (NumThreads > 1) // Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/NumThreads % WARPSIZE,
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
}
return BlockThreadId == 0;
#else
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (!(Liveness & (Liveness + 1))) // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/GetThreadIdInBlock() % WARPSIZE);
else if (!isRuntimeUninitialized) // Dispersed lanes. Only threads in L2
// parallel region may enter here; return
// early.
return gpu_irregular_simd_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
//
// Only L1 parallel region can enter this if condition.
if (NumThreads > WARPSIZE) {
uint32_t WarpsNeeded = (NumThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = BlockThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
BlockThreadId);
return BlockThreadId == 0;
} else if (isRuntimeUninitialized /* Never an L2 parallel region without the OMP runtime */) {
return BlockThreadId == 0;
}
// Get the OMP thread Id. This is different from BlockThreadId in the case of
// an L2 parallel region.
return global_tid == 0;
#endif // __CUDA_ARCH__ >= 700
}
EXTERN __attribute__((deprecated)) int32_t __kmpc_nvptx_parallel_reduce_nowait(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct,
isSPMDMode(), isRuntimeUninitialized());
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, int32_t num_vars, size_t reduce_size,
void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
checkSPMDMode(loc), checkRuntimeUninitialized(loc));
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/true, /*isRuntimeUninitialized=*/true);
}
EXTERN
int32_t __kmpc_nvptx_parallel_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct) {
return nvptx_parallel_reduce_nowait(
global_tid, num_vars, reduce_size, reduce_data, shflFct, cpyFct,
/*isSPMDExecutionMode=*/false, /*isRuntimeUninitialized=*/true);
}
INLINE
static int32_t nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct,
bool isSPMDExecutionMode) {
uint32_t ThreadId = GetLogicalThreadIdInBlock(isSPMDExecutionMode);
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
isSPMDExecutionMode ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ volatile bool IsLastTeam;
// Team masters of all teams write to the scratchpad.
if (ThreadId == 0) {
unsigned int *timestamp = GetTeamsReductionTimestamp();
char *scratchpad = GetTeamsReductionScratchpad();
scratchFct(reduce_data, scratchpad, TeamId, NumTeams);
__threadfence();
// atomicInc increments 'timestamp' and has a range [0, NumTeams-1].
// It resets 'timestamp' back to 0 once the last team increments
// this counter.
unsigned val = atomicInc(timestamp, NumTeams - 1);
IsLastTeam = val == NumTeams - 1;
}
// We have to wait on L1 barrier because in GENERIC mode the workers
// are waiting on barrier 0 for work.
//
// If we guard this barrier as follows it leads to deadlock, probably
// because of a compiler bug: if (!IsGenericMode()) __syncthreads();
uint16_t SyncWarps = (NumThreads + WARPSIZE - 1) / WARPSIZE;
named_sync(L1_BARRIER, SyncWarps * WARPSIZE);
// If this team is not the last, quit.
if (/* Volatile read by all threads */ !IsLastTeam)
return 0;
//
// Last team processing.
//
// Threads in excess of #teams do not participate in reduction of the
// scratchpad values.
#if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700
uint32_t ActiveThreads = NumThreads;
if (NumTeams < NumThreads) {
ActiveThreads =
(NumTeams < WARPSIZE) ? 1 : NumTeams & ~((uint16_t)WARPSIZE - 1);
}
if (ThreadId >= ActiveThreads)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = ActiveThreads + ThreadId; i < NumTeams; i += ActiveThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
uint32_t WarpId = ThreadId / WARPSIZE;
// Reduce across warps to the warp master.
if ((ActiveThreads % WARPSIZE == 0) ||
(WarpId < WarpsNeeded - 1)) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else if (ActiveThreads > 1) // Partial warp but contiguous lanes
// Only SPMD execution mode comes thru this case.
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/ActiveThreads % WARPSIZE,
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
if (ActiveThreads > WARPSIZE) {
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#else
if (ThreadId >= NumTeams)
return 0;
// Load from scratchpad and reduce.
char *scratchpad = GetTeamsReductionScratchpad();
ldFct(reduce_data, scratchpad, ThreadId, NumTeams, /*Load only*/ 0);
for (uint32_t i = NumThreads + ThreadId; i < NumTeams; i += NumThreads)
ldFct(reduce_data, scratchpad, i, NumTeams, /*Load and reduce*/ 1);
// Reduce across warps to the warp master.
uint32_t Liveness = __ACTIVEMASK();
if (Liveness == 0xffffffff) // Full warp
gpu_regular_warp_reduce(reduce_data, shflFct);
else // Partial warp but contiguous lanes
gpu_irregular_warp_reduce(reduce_data, shflFct,
/*LaneCount=*/__popc(Liveness),
/*LaneId=*/ThreadId % WARPSIZE);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = NumTeams < NumThreads ? NumTeams : NumThreads;
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded, ThreadId);
}
#endif // __CUDA_ARCH__ >= 700
return ThreadId == 0;
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait(int32_t global_tid, int32_t num_vars,
size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct,
kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, isSPMDMode());
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_spmd(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/true);
}
EXTERN
int32_t __kmpc_nvptx_teams_reduce_nowait_simple_generic(
int32_t global_tid, int32_t num_vars, size_t reduce_size, void *reduce_data,
kmp_ShuffleReductFctPtr shflFct, kmp_InterWarpCopyFctPtr cpyFct,
kmp_CopyToScratchpadFctPtr scratchFct, kmp_LoadReduceFctPtr ldFct) {
return nvptx_teams_reduce_nowait(global_tid, num_vars, reduce_size,
reduce_data, shflFct, cpyFct, scratchFct,
ldFct, /*isSPMDExecutionMode=*/false);
}
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_simple(kmp_Ident *loc,
int32_t global_tid,
kmp_CriticalName *crit) {
if (checkSPMDMode(loc) && GetThreadIdInBlock() != 0)
return 0;
// The master thread of the team actually does the reduction.
while (atomicCAS((uint32_t *)crit, 0, 1))
;
return 1;
}
EXTERN void
__kmpc_nvptx_teams_end_reduce_nowait_simple(kmp_Ident *loc, int32_t global_tid,
kmp_CriticalName *crit) {
__threadfence_system();
(void)atomicExch((uint32_t *)crit, 0);
}
INLINE static bool isMaster(kmp_Ident *loc, uint32_t ThreadId) {
return checkGenericMode(loc) || IsTeamMaster(ThreadId);
}
INLINE static uint32_t roundToWarpsize(uint32_t s) {
if (s < WARPSIZE)
return 1;
return (s & ~(unsigned)(WARPSIZE - 1));
}
__device__ static volatile uint32_t IterCnt = 0;
__device__ static volatile uint32_t Cnt = 0;
EXTERN int32_t __kmpc_nvptx_teams_reduce_nowait_v2(
kmp_Ident *loc, int32_t global_tid, void *global_buffer,
int32_t num_of_records, void *reduce_data, kmp_ShuffleReductFctPtr shflFct,
kmp_InterWarpCopyFctPtr cpyFct, kmp_ListGlobalFctPtr lgcpyFct,
kmp_ListGlobalFctPtr lgredFct, kmp_ListGlobalFctPtr glcpyFct,
kmp_ListGlobalFctPtr glredFct) {
// Terminate all threads in non-SPMD mode except for the master thread.
if (checkGenericMode(loc) && GetThreadIdInBlock() != GetMasterThreadID())
return 0;
uint32_t ThreadId = GetLogicalThreadIdInBlock(checkSPMDMode(loc));
// In non-generic mode all workers participate in the teams reduction.
// In generic mode only the team master participates in the teams
// reduction because the workers are waiting for parallel work.
uint32_t NumThreads =
checkSPMDMode(loc) ? GetNumberOfOmpThreads(/*isSPMDExecutionMode=*/true)
: /*Master thread only*/ 1;
uint32_t TeamId = GetBlockIdInKernel();
uint32_t NumTeams = GetNumberOfBlocksInKernel();
__shared__ unsigned Bound;
__shared__ unsigned ChunkTeamCount;
// Block progress for teams greater than the current upper
// limit. We always only allow a number of teams less or equal
// to the number of slots in the buffer.
bool IsMaster = isMaster(loc, ThreadId);
while (IsMaster) {
// Atomic read
Bound = atomicAdd((uint32_t *)&IterCnt, 0);
if (TeamId < Bound + num_of_records)
break;
}
if (IsMaster) {
int ModBockId = TeamId % num_of_records;
if (TeamId < num_of_records)
lgcpyFct(global_buffer, ModBockId, reduce_data);
else
lgredFct(global_buffer, ModBockId, reduce_data);
__threadfence_system();
// Increment team counter.
// This counter is incremented by all teams in the current
// BUFFER_SIZE chunk.
ChunkTeamCount = atomicInc((uint32_t *)&Cnt, num_of_records - 1);
}
// Synchronize
if (checkSPMDMode(loc))
__kmpc_barrier(loc, global_tid);
// reduce_data is global or shared so before being reduced within the
// warp we need to bring it in local memory:
// local_reduce_data = reduce_data[i]
//
// Example for 3 reduction variables a, b, c (of potentially different
// types):
//
// buffer layout (struct of arrays):
// a, a, ..., a, b, b, ... b, c, c, ... c
// |__________|
// num_of_records
//
// local_data_reduce layout (struct):
// a, b, c
//
// Each thread will have a local struct containing the values to be
// reduced:
// 1. do reduction within each warp.
// 2. do reduction across warps.
// 3. write the final result to the main reduction variable
// by returning 1 in the thread holding the reduction result.
// Check if this is the very last team.
unsigned NumRecs = min(NumTeams, num_of_records);
if (ChunkTeamCount == NumTeams - Bound - 1) {
//
// Last team processing.
//
if (ThreadId >= NumRecs)
return 0;
NumThreads = roundToWarpsize(min(NumThreads, NumRecs));
if (ThreadId >= NumThreads)
return 0;
// Load from buffer and reduce.
glcpyFct(global_buffer, ThreadId, reduce_data);
for (uint32_t i = NumThreads + ThreadId; i < NumRecs; i += NumThreads)
glredFct(global_buffer, i, reduce_data);
// Reduce across warps to the warp master.
if (NumThreads > 1) {
gpu_regular_warp_reduce(reduce_data, shflFct);
// When we have more than [warpsize] number of threads
// a block reduction is performed here.
uint32_t ActiveThreads = min(NumRecs, NumThreads);
if (ActiveThreads > WARPSIZE) {
uint32_t WarpsNeeded = (ActiveThreads + WARPSIZE - 1) / WARPSIZE;
// Gather all the reduced values from each warp
// to the first warp.
cpyFct(reduce_data, WarpsNeeded);
uint32_t WarpId = ThreadId / WARPSIZE;
if (WarpId == 0)
gpu_irregular_warp_reduce(reduce_data, shflFct, WarpsNeeded,
ThreadId);
}
}
if (IsMaster) {
Cnt = 0;
IterCnt = 0;
return 1;
}
return 0;
}
if (IsMaster && ChunkTeamCount == num_of_records - 1) {
// Allow SIZE number of teams to proceed writing their
// intermediate results to the global buffer.
atomicAdd((uint32_t *)&IterCnt, num_of_records);
}
return 0;
}
|
70acf4dcc62a6a3b55ee68ba4b0e1d173096c35d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#ifdef __NVCC__
#include <hipcub/hipcub.hpp>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/math/depthwise_conv.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val, int warp_size) {
typedef hipcub::WarpReduce<T> WarpReduce;
typename WarpReduce::TempStorage temp_storage;
val = WarpReduce(temp_storage).Sum(val, warp_size);
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduceSum(T val) {
static __shared__ T shared[32];
int thread_id = threadIdx.x + threadIdx.y * blockDim.x +
threadIdx.z * blockDim.x * blockDim.y;
int warp_size = min(blockDim.x * blockDim.y * blockDim.z, warpSize);
int lane = thread_id % warp_size;
int wid = thread_id / warp_size;
val = WarpReduceSum(val, warp_size); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
int block_size = blockDim.x * blockDim.y * blockDim.z;
if (thread_id < (block_size - 1) / warp_size + 1) {
val = shared[lane];
} else {
val = static_cast<T>(0);
}
if (wid == 0) {
val = WarpReduceSum(val, warp_size); // Final reduce within first warp
}
__syncthreads();
if (thread_id != 0) {
val = static_cast<T>(0);
}
return val;
}
#define ARG_DEFINE_KernelDepthwiseConv \
const T *const input_data, const T *const filter_data, const int batch_size, \
const int output_channels, const int output_height, \
const int output_width, const int input_channels, \
const int input_height, const int input_width, \
const int filter_multiplier, const int filter_height, \
const int filter_width, const int stride_height, const int stride_width, \
const int padding_height, const int padding_width, \
const int dilate_height, const int dilate_width, T *const output_data
// A Cuda kernel to compute the depthwise convolution forward pass
// in NCHW format.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvNCHW(
ARG_DEFINE_KernelDepthwiseConv) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= (output_channels * batch_size * output_height * output_width))
return;
const int w_out = idx % output_width;
const int h_out = (idx / output_width) % output_height;
const int c_out = (idx / output_width / output_height) % output_channels;
const int batch = idx / output_width / output_height / output_channels;
const int c_in = c_out / filter_multiplier;
const T* weight = filter_data + c_out * filter_height * filter_width;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + filter_height * dilate_height;
const int w_in_end = w_in_start + filter_width * dilate_width;
int in_offset =
((batch * input_channels + c_in) * input_height) * input_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
int weight_offset = 0;
#pragma unroll
for (int h_in = h_in_start; h_in < h_in_end; h_in += dilate_height) {
#pragma unroll
for (int w_in = w_in_start; w_in < w_in_end; w_in += dilate_width) {
if (h_in >= h_start && h_in < h_end && w_in >= w_start && w_in < w_end) {
int offset = in_offset + h_in * input_width + w_in;
T in_data = input_data[offset];
if (fuse_relu_before_conv) {
value += weight[weight_offset] * max(0.0f, in_data);
} else {
value += weight[weight_offset] * in_data;
}
}
weight_offset++;
}
}
int index = batch * output_channels * output_height * output_width +
c_out * output_height * output_width + h_out * output_width +
w_out;
output_data[index] = value;
}
// A Cuda kernel to compute the depthwise convolution forward pass
// in NHWC format.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvNHWC(
ARG_DEFINE_KernelDepthwiseConv) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= (output_channels * batch_size * output_height * output_width))
return;
const int c_out = idx % output_channels;
const int w_out = (idx / output_channels) % output_width;
const int h_out = (idx / output_channels / output_width) % output_height;
const int batch = idx / output_width / output_height / output_channels;
const int c_in = c_out / filter_multiplier;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + filter_height * dilate_height;
const int w_in_end = w_in_start + filter_width * dilate_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
int weight_offset = 0;
#pragma unroll
for (int h_in = h_in_start; h_in < h_in_end; h_in += dilate_height) {
#pragma unroll
for (int w_in = w_in_start; w_in < w_in_end; w_in += dilate_width) {
if (h_in >= h_start && h_in < h_end && w_in >= w_start && w_in < w_end) {
int offset = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
T in_data = input_data[offset];
const T* weight = filter_data + weight_offset * output_channels + c_out;
if (fuse_relu_before_conv) {
value += weight[0] * max(0.0f, in_data);
} else {
value += weight[0] * in_data;
}
}
weight_offset++;
}
}
int index = batch * output_channels * output_height * output_width +
h_out * output_width * output_channels + w_out * output_channels +
c_out;
output_data[index] = value;
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvCFilterNCHW(
ARG_DEFINE_KernelDepthwiseConv) {
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const T* weight = filter_data + c_out * c_filter * c_filter;
for (int i = 0; i < c_filter * c_filter; i++) r_weight[i] = weight[i];
for (int w_out = threadIdx.x; w_out < output_width; w_out += blockDim.x) {
for (int h_out = threadIdx.y; h_out < output_height; h_out += blockDim.y) {
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const int c_in = c_out / filter_multiplier;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + c_filter * dilate_height;
const int w_in_end = w_in_start + c_filter * dilate_width;
int in_offset =
((batch * input_channels + c_in) * input_height) * input_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
for (int h_in = h_in_start, h_f = 0; h_f < c_filter;
h_in += dilate_height, h_f++) {
for (int w_in = w_in_start, w_f = 0; w_f < c_filter;
w_in += dilate_width, w_f++) {
if (h_in >= 0 && h_in < input_height && w_in >= 0 &&
w_in < input_width) {
int offset = in_offset + h_in * input_width + w_in;
if (fuse_relu_before_conv) {
value += r_weight[h_f * c_filter + w_f] *
max(0.0f, input_data[offset]);
} else {
value += r_weight[h_f * c_filter + w_f] * input_data[offset];
}
}
}
}
int index =
((batch * gridDim.x + c_out) * output_height + h_out) * output_width +
w_out;
output_data[index] = value;
}
}
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvCFilterNHWC(
ARG_DEFINE_KernelDepthwiseConv) {
const int batch = blockIdx.z;
int h_out = blockIdx.x * dilate_height + blockIdx.y;
if (h_out >= output_height) {
return;
}
int in_offset = batch * input_height * input_width * input_channels;
int out_offset =
(batch * output_height + h_out) * output_width * output_channels;
const int h_in_start = -padding_height + h_out * stride_height;
const int wi_size = (output_width + dilate_width - 1) / dilate_width;
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
for (int c_out = threadIdx.x; c_out < output_channels; c_out += blockDim.x) {
for (int i = 0; i < c_filter * c_filter; i++) {
const T* weight = filter_data + i * output_channels + c_out;
r_weight[i] = weight[0];
}
const int c_in = c_out / filter_multiplier;
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int w_out = i_wi * dilate_width + i_dw;
if (w_out >= output_width) {
continue;
}
T value = 0;
const int w_in_start = -padding_width + w_out * stride_width;
for (int h_in = h_in_start, h_f = 0; h_f < c_filter;
h_in += dilate_height, h_f++) {
for (int w_in = w_in_start, w_f = 0; w_f < c_filter;
w_in += dilate_width, w_f++) {
if (h_in >= 0 && h_in < input_height && w_in >= 0 &&
w_in < input_width) {
int offset =
in_offset + (h_in * input_width + w_in) * input_channels + c_in;
if (fuse_relu_before_conv) {
value += r_weight[h_f * c_filter + w_f] *
max(0.0f, input_data[offset]);
} else {
value += r_weight[h_f * c_filter + w_f] * input_data[offset];
}
}
}
}
int index = out_offset + w_out * output_channels + c_out;
output_data[index] = value;
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvSp(ARG_DEFINE_KernelDepthwiseConv) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvNCHW<T, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
} else {
KernelDepthwiseConvNHWC<T, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvCFilterNCHW<T, c_filter, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
} else {
KernelDepthwiseConvCFilterNHWC<T, c_filter, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
}
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t input.
#define ARG_DEFINE_KernelDepthwiseConvInputGrad \
const T *const input_data, const T *const output_grad_data, \
const T *const filter_data, const int batch_size, \
const int output_channels, const int output_height, \
const int output_width, const int input_channels, \
const int input_height, const int input_width, \
const int filter_multiplier, const int filter_height, \
const int filter_width, const int stride_height, const int stride_width, \
const int padding_height, const int padding_width, \
const int dilate_height, const int dilate_width, \
T *const input_grad_data
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradNCHW(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
for (int w_in = threadIdx.x; w_in < input_width; w_in += blockDim.x) {
for (int h_in = threadIdx.y; h_in < input_height; h_in += blockDim.y) {
const int c_out_start = c_in * filter_multiplier;
int h_out_start =
h_in - (filter_height - 1) * dilate_height + padding_height;
int h_out_end = h_in + padding_height;
int w_out_start =
w_in - (filter_width - 1) * dilate_width + padding_width;
int w_out_end = w_in + padding_width;
T value = 0;
int index =
((batch * gridDim.x + c_in) * input_height + h_in) * input_width +
w_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier;
c_out++) {
int filter_offset = (c_out + 1) * filter_height * filter_width;
for (int h_out = h_out_start; h_out <= h_out_end;
h_out += dilate_height) {
for (int w_out = w_out_start; w_out <= w_out_end;
w_out += dilate_width) {
filter_offset--;
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_channels + c_out) * output_height +
s_h_out) *
output_width +
s_w_out;
value += output_grad_data[output_grad_offset] *
filter_data[filter_offset];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradNHWC(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int batch = blockIdx.z;
int h_in = blockIdx.x * dilate_height + blockIdx.y;
if (h_in >= input_height) {
return;
}
for (int c_in = threadIdx.x; c_in < input_channels; c_in += blockDim.x) {
for (int w_in = threadIdx.y; w_in < input_width; w_in += blockDim.y) {
int h_out_start =
h_in - (filter_height - 1) * dilate_height + padding_height;
int w_out_start =
w_in - (filter_width - 1) * dilate_width + padding_width;
T value = 0;
int index = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
int weight_offset = filter_height * filter_width;
for (int h_out = h_out_start, h_f = 0; h_f < filter_height;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < filter_width;
w_out += dilate_width, w_f++) {
weight_offset--;
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_height + s_h_out) * output_width + s_w_out) *
output_channels +
c_out;
int filter_offset = weight_offset * output_channels + c_out;
value += output_grad_data[output_grad_offset] *
filter_data[filter_offset];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter, int c_filter_multiplier,
bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradCFilterNCHW(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int kWeightSize = c_filter * c_filter * c_filter_multiplier + 1;
T r_weight[kWeightSize];
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
const T* weight = filter_data + c_out * c_filter * c_filter;
for (int i = 0; i < c_filter * c_filter; i++)
r_weight[i + c_i * c_filter * c_filter] =
weight[c_filter * c_filter - i - 1];
}
for (int w_in = threadIdx.x; w_in < input_width; w_in += blockDim.x) {
for (int h_in = threadIdx.y; h_in < input_height; h_in += blockDim.y) {
int h_out_start = h_in - (c_filter - 1) * dilate_height + padding_height;
int w_out_start = w_in - (c_filter - 1) * dilate_width + padding_width;
T value = 0;
int index =
((batch * gridDim.x + c_in) * input_height + h_in) * input_width +
w_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
for (int h_out = h_out_start, h_f = 0; h_f < c_filter;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < c_filter;
w_out += dilate_width, w_f++) {
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_channels + c_out) * output_height +
s_h_out) *
output_width +
s_w_out;
value +=
output_grad_data[output_grad_offset] *
r_weight[h_f * c_filter + w_f + c_i * c_filter * c_filter];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter, int c_filter_multiplier,
bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradCFilterNHWC(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
int h_in = blockIdx.x * dilate_height + blockIdx.y;
if (h_in >= input_height) {
return;
}
const int kWeightSize = c_filter * c_filter * c_filter_multiplier + 1;
T r_weight[kWeightSize];
const int batch = blockIdx.z;
const int wi_size = (input_width + dilate_width - 1) / dilate_width;
const int h_out_start =
h_in - (c_filter - 1) * dilate_height + padding_height;
for (int c_in = threadIdx.x; c_in < input_channels; c_in += blockDim.x) {
for (int c_i = 0; c_i < c_filter_multiplier; c_i++) {
int c_out = c_in * c_filter_multiplier + c_i;
for (int i = 0; i < c_filter * c_filter; i++)
r_weight[i + c_i * c_filter * c_filter] =
filter_data[(c_filter * c_filter - i - 1) * output_channels +
c_out];
}
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int w_in = i_wi * dilate_width + i_dw;
if (w_in >= input_width) {
continue;
}
int w_out_start = w_in - (c_filter - 1) * dilate_width + padding_width;
T value = 0;
int index = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < c_filter_multiplier; c_i++) {
int c_out = c_in * c_filter_multiplier + c_i;
for (int h_out = h_out_start, h_f = 0; h_f < c_filter;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < c_filter;
w_out += dilate_width, w_f++) {
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_height + s_h_out) * output_width + s_w_out) *
output_channels +
c_out;
value +=
output_grad_data[output_grad_offset] *
r_weight[h_f * c_filter + w_f + c_i * c_filter * c_filter];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvInputGradSp(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter_multiplier == 0 || c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvInputGradNCHW<T, fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, final_filter_multiplier, filter_height,
filter_width, h_stride, w_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
} else {
KernelDepthwiseConvInputGradNHWC<T, fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, final_filter_multiplier, filter_height,
filter_width, h_stride, w_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvInputGradCFilterNCHW<T, c_filter, c_filter_multiplier,
fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, c_filter_multiplier, filter_height,
filter_width, c_stride, c_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
} else {
KernelDepthwiseConvInputGradCFilterNHWC<T, c_filter, c_filter_multiplier,
fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, c_filter_multiplier, filter_height,
filter_width, c_stride, c_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
}
}
}
// Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradNCHW(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
T s = 0;
int gbid = ((blockIdx.z * gridDim.y) + blockIdx.y) * gridDim.x + blockIdx.x;
for (int image_w = threadIdx.x; image_w < output_width;
image_w += blockDim.x) {
for (int bid = 0; bid < num; bid++) {
for (int image_h = threadIdx.y; image_h < output_height;
image_h += blockDim.y) {
int kernel_id = blockIdx.z;
int kernel_h = blockIdx.y * dilate_height - padding_height;
int kernel_w = blockIdx.x * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
#define gaid(N, C, H, W) \
((((N)*gridDim.z + (C)) * output_height + (H)) * output_width + (W))
int input_id = ((bid * (gridDim.z / filter_multiplier) +
kernel_id / filter_multiplier) *
input_height +
image_hk) *
input_width +
image_wk;
if (fuse_relu_before_conv) {
s += output_grad_data[gaid(bid, kernel_id, image_h, image_w)] *
max(0.0f, input_data[input_id]);
} else {
s += output_grad_data[gaid(bid, kernel_id, image_h, image_w)] *
input_data[input_id];
}
#undef gaid
}
}
}
T val = BlockReduceSum(s);
platform::CudaAtomicAdd(&filter_grad_data[gbid], val);
}
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradNHWC(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
int bid = blockIdx.z;
int image_h = blockIdx.y;
int kernel_iw = blockIdx.x % filter_width;
int kernel_ih = blockIdx.x / filter_width;
for (int kernel_id = threadIdx.x; kernel_id < output_channels;
kernel_id += blockDim.x) {
T s = 0;
int gbid =
((kernel_id * filter_height) + kernel_ih) * filter_width + kernel_iw;
for (int image_w = threadIdx.y; image_w < output_width;
image_w += blockDim.y) {
int kernel_h = kernel_ih * dilate_height - padding_height;
int kernel_w = kernel_iw * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
#define gaid(N, H, W, C) \
((((N)*output_height + (H)) * output_width + (W)) * output_channels + (C))
int input_id =
((bid * input_height + image_hk) * input_width + image_wk) *
input_channels +
kernel_id / filter_multiplier;
if (fuse_relu_before_conv) {
s += output_grad_data[gaid(bid, image_h, image_w, kernel_id)] *
max(0.0f, input_data[input_id]);
} else {
s += output_grad_data[gaid(bid, image_h, image_w, kernel_id)] *
input_data[input_id];
}
#undef gaid
}
platform::CudaAtomicAdd(&filter_grad_data[gbid], s);
}
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradCFilterNHWC(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
const int bid = blockIdx.z;
int image_h = blockIdx.x * dilate_height + blockIdx.y;
if (image_h >= output_height) {
return;
}
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
const int wi_size = (output_width + dilate_width - 1) / dilate_width;
for (int kernel_id = threadIdx.x; kernel_id < output_channels;
kernel_id += blockDim.x) {
for (int i = 0; i < c_filter * c_filter; ++i) {
r_weight[i] = 0;
}
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int image_w = i_wi * dilate_width + i_dw;
if (image_w >= output_width) {
continue;
}
for (int kernel_ih = 0; kernel_ih < c_filter; ++kernel_ih) {
for (int kernel_iw = 0; kernel_iw < c_filter; ++kernel_iw) {
int kernel_h = kernel_ih * dilate_height - padding_height;
int kernel_w = kernel_iw * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
int input_id =
((bid * input_height + image_hk) * input_width + image_wk) *
input_channels +
kernel_id / filter_multiplier;
int output_id =
((bid * output_height + image_h) * output_width + image_w) *
output_channels +
kernel_id;
T s = 0;
if (fuse_relu_before_conv) {
s = output_grad_data[output_id] * max(0.0f, input_data[input_id]);
} else {
s = output_grad_data[output_id] * input_data[input_id];
}
r_weight[kernel_ih * c_filter + kernel_iw] += s;
}
}
}
for (int i = 0; i < c_filter * c_filter; ++i) {
T* weight = filter_grad_data + i * output_channels + kernel_id;
platform::CudaAtomicAdd(&weight[0], r_weight[i]);
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvFilterGradSp(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter_multiplier == 0 || c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvFilterGradNCHW<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
} else {
KernelDepthwiseConvFilterGradNHWC<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvFilterGradNCHW<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
} else {
KernelDepthwiseConvFilterGradCFilterNHWC<T, c_filter,
fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
}
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <class T, bool fuse_relu_before_conv>
class DepthwiseConvFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations, framework::Tensor* output,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output->dims()[1]
: output->dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output->dims()[2]
: output->dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output->dims()[3]
: output->dims()[2]);
const int ksize_height = filter.dims()[2];
const int ksize_width = filter.dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* filter_data = filter.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
framework::Tensor filter_hwc;
if (data_layout == DataLayout::kNHWC) {
framework::DDim filter_hwc_dims({filter.dims()[2], filter.dims()[3],
filter.dims()[0], filter.dims()[1]});
filter_hwc.Resize(filter_hwc_dims);
filter_hwc.mutable_data<T>(context.GetPlace());
std::vector<int> perm_axis({2, 3, 0, 1});
math::TransposeNormal<platform::CUDADeviceContext, T> trans;
trans(context, filter, &filter_hwc, perm_axis);
filter_data = filter_hwc.data<T>();
}
int thread = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (output_width > 1024 && output_width <= 2048)
thread = (output_width - 1) / 2 + 1;
else if (output_width > 512 && output_width <= 1024)
thread = output_width;
#ifdef __HIPCC__
thread = ::min(thread, 256);
#endif
blocks = ::min(::max(thread / output_width, 1), output_height);
threads = dim3(::min(output_width, thread), blocks, 1);
grid = dim3(output_channels, batch_size, 1);
} else {
#ifdef __HIPCC__
thread = ::min(thread, 256);
#endif
blocks = ::min(
::max(thread / output_channels, 1),
((output_width + dilate_width - 1) / dilate_width) * dilate_width);
threads = dim3(::min(output_channels, thread), blocks, 1);
grid = dim3((output_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
}
int filter_multiplier = output_channels / input_channels;
int nums_output =
batch_size * output_channels * output_height * output_width;
#ifdef __HIPCC__
int block_size = 256;
#else
int block_size = 512;
#endif
int grid_size = (nums_output + block_size - 1) / block_size;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (c_filter == -1) { \
threads.x = block_size; \
grid.x = grid_size; \
threads.y = threads.z = grid.y = grid.z = 1; \
} \
if (data_layout != DataLayout::kNHWC) { \
hipLaunchKernelGGL(( KernelDepthwiseConvSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
input_data, filter_data, batch_size, output_channels, output_height, \
output_width, input_channels, input_height, input_width, \
filter_multiplier, ksize_height, ksize_width, stride_height, \
stride_width, padding_height, padding_width, dilate_height, \
dilate_width, output_data); \
} else { \
hipLaunchKernelGGL(( KernelDepthwiseConvSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
input_data, filter_data, batch_size, output_channels, output_height, \
output_width, input_channels, input_height, input_width, \
filter_multiplier, ksize_height, ksize_width, stride_height, \
stride_width, padding_height, padding_width, dilate_height, \
dilate_width, output_data); \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
// NOTE(liangdun): 0,0 for other case
// add other case if needed, e.g. check_case(2^n,1)
#undef check_case
}
};
template <typename T, bool fuse_relu_before_conv>
class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& filter,
const framework::Tensor& output_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
framework::Tensor* input_grad,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[1]
: output_grad.dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[2]
: output_grad.dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[3]
: output_grad.dims()[2]);
const int ksize_height = filter.dims()[2];
const int ksize_width = filter.dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* filter_data = filter.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
framework::Tensor filter_hwc;
if (data_layout == DataLayout::kNHWC) {
framework::DDim filter_hwc_dims({filter.dims()[2], filter.dims()[3],
filter.dims()[0], filter.dims()[1]});
filter_hwc.Resize(filter_hwc_dims);
filter_hwc.mutable_data<T>(context.GetPlace());
std::vector<int> perm_axis({2, 3, 0, 1});
math::TransposeNormal<platform::CUDADeviceContext, T> trans;
trans(context, filter, &filter_hwc, perm_axis);
filter_data = filter_hwc.data<T>();
}
int thread = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (input_width > 1024 && input_width <= 2048) {
thread = (input_width - 1) / 2 + 1;
} else if (input_width > 512 && input_width <= 1024) {
thread = input_width;
}
blocks = ::min(::max(thread / input_width, 1), input_height);
threads = dim3(::min(input_width, thread), blocks, 1);
grid = dim3(input_channels, batch_size, 1);
} else {
blocks = ::min(
::max(thread / input_channels, 1),
((input_width + dilate_width - 1) / dilate_width) * dilate_width);
threads = dim3(::min(input_channels, thread), blocks, 1);
grid = dim3((input_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
}
int filter_multiplier = output_channels / input_channels;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (data_layout != DataLayout::kNHWC) { \
hipLaunchKernelGGL(( KernelDepthwiseConvInputGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
input_data, output_grad_data, filter_data, batch_size, \
output_channels, output_height, output_width, input_channels, \
input_height, input_width, filter_multiplier, ksize_height, \
ksize_width, stride_height, stride_width, padding_height, \
padding_width, dilate_height, dilate_width, input_grad_data); \
} else { \
hipLaunchKernelGGL(( KernelDepthwiseConvInputGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
input_data, output_grad_data, filter_data, batch_size, \
output_channels, output_height, output_width, input_channels, \
input_height, input_width, filter_multiplier, ksize_height, \
ksize_width, stride_height, stride_width, padding_height, \
padding_width, dilate_height, dilate_width, input_grad_data); \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
// NOTE(liangdun): 0,0 for other case
// add other case if needed, e.g. check_case(2^n,1)
#undef check_case
}
};
template <typename T, bool fuse_relu_before_conv>
class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
framework::Tensor* filter_grad,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[1]
: output_grad.dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[2]
: output_grad.dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[3]
: output_grad.dims()[2]);
const int ksize_height = filter_grad->dims()[2];
const int ksize_width = filter_grad->dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace());
int block_size = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (output_width > 1024 && output_width <= 2048) {
block_size = (output_width - 1) / 2 + 1;
} else if (output_width > 512 && output_width <= 1024) {
block_size = output_width;
}
blocks = ::min(::max(block_size / output_width, 1), output_height);
grid = dim3(ksize_width, ksize_height, output_channels);
threads = dim3(::min(output_width, block_size), blocks, 1);
} else {
blocks = ::min(
::max(block_size / output_channels, 1),
((output_width + dilate_width - 1) / dilate_width) * dilate_width);
grid = dim3((output_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
threads = dim3(::min(output_channels, block_size), blocks, 1);
}
int filter_multiplier = output_channels / input_channels;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (data_layout != DataLayout::kNHWC) { \
hipLaunchKernelGGL(( KernelDepthwiseConvFilterGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
output_grad_data, input_data, batch_size, output_channels, \
output_height, output_width, input_channels, input_height, \
input_width, filter_multiplier, ksize_height, ksize_width, \
stride_height, stride_width, padding_height, padding_width, \
dilate_height, dilate_width, filter_grad_data); \
} else { \
framework::Tensor filter_grad_hwc; \
if (c_filter != -1) { \
framework::DDim filter_grad_hwc_dims( \
{filter_grad->dims()[2], filter_grad->dims()[3], \
filter_grad->dims()[0], filter_grad->dims()[1]}); \
filter_grad_hwc.Resize(filter_grad_hwc_dims); \
filter_grad_hwc.mutable_data<T>(context.GetPlace()); \
math::SetConstant<platform::CUDADeviceContext, T> set_zero; \
set_zero(context, &filter_grad_hwc, static_cast<T>(0)); \
filter_grad_data = filter_grad_hwc.data<T>(); \
} else { \
block_size = 512; \
if (output_channels > 1024 && output_channels <= 2048) { \
block_size = (output_channels - 1) / 2 + 1; \
} else if (output_channels > 512 && output_channels <= 1024) { \
block_size = output_channels; \
} \
blocks = \
::min(::max(block_size / output_channels, 1), output_width); \
grid = dim3(ksize_width * ksize_height, output_height, batch_size); \
threads = dim3(::min(output_channels, block_size), blocks, 1); \
} \
hipLaunchKernelGGL(( KernelDepthwiseConvFilterGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv>), dim3(grid), dim3(threads), 0, context.stream(), \
output_grad_data, input_data, batch_size, output_channels, \
output_height, output_width, input_channels, input_height, \
input_width, filter_multiplier, ksize_height, ksize_width, \
stride_height, stride_width, padding_height, padding_width, \
dilate_height, dilate_width, filter_grad_data); \
if (c_filter != -1) { \
std::vector<int> perm_axis({2, 3, 0, 1}); \
math::TransposeNormal<platform::CUDADeviceContext, T> trans; \
trans(context, filter_grad_hwc, filter_grad, perm_axis); \
} \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
#undef check_case
}
};
template class DepthwiseConvFunctor<platform::CUDADeviceContext, float, false>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, double, false>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float,
false>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext,
double, false>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
float, false>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
double, false>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, float, true>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, double, true>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float,
true>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext,
double, true>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
float, true>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
double, true>;
} // namespace math
} // namespace operators
} // namespace paddle
| 70acf4dcc62a6a3b55ee68ba4b0e1d173096c35d.cu | /* Copyright (c) 2016 paddlepaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <vector>
#ifdef __NVCC__
#include <cub/cub.cuh>
#endif
#ifdef __HIPCC__
#include <hipcub/hipcub.hpp>
namespace cub = hipcub;
#endif
#include "paddle/fluid/operators/math/depthwise_conv.h"
#include "paddle/fluid/operators/math/math_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_device_function.h"
#include "paddle/fluid/platform/device/gpu/gpu_primitives.h"
namespace paddle {
namespace operators {
namespace math {
template <typename T>
static __forceinline__ __device__ T WarpReduceSum(T val, int warp_size) {
typedef cub::WarpReduce<T> WarpReduce;
typename WarpReduce::TempStorage temp_storage;
val = WarpReduce(temp_storage).Sum(val, warp_size);
return val;
}
template <typename T>
__forceinline__ __device__ T BlockReduceSum(T val) {
static __shared__ T shared[32];
int thread_id = threadIdx.x + threadIdx.y * blockDim.x +
threadIdx.z * blockDim.x * blockDim.y;
int warp_size = min(blockDim.x * blockDim.y * blockDim.z, warpSize);
int lane = thread_id % warp_size;
int wid = thread_id / warp_size;
val = WarpReduceSum(val, warp_size); // Each warp performs partial reduction
if (lane == 0) shared[wid] = val; // Write reduced value to shared memory
__syncthreads(); // Wait for all partial reductions
// read from shared memory only if that warp existed
int block_size = blockDim.x * blockDim.y * blockDim.z;
if (thread_id < (block_size - 1) / warp_size + 1) {
val = shared[lane];
} else {
val = static_cast<T>(0);
}
if (wid == 0) {
val = WarpReduceSum(val, warp_size); // Final reduce within first warp
}
__syncthreads();
if (thread_id != 0) {
val = static_cast<T>(0);
}
return val;
}
#define ARG_DEFINE_KernelDepthwiseConv \
const T *const input_data, const T *const filter_data, const int batch_size, \
const int output_channels, const int output_height, \
const int output_width, const int input_channels, \
const int input_height, const int input_width, \
const int filter_multiplier, const int filter_height, \
const int filter_width, const int stride_height, const int stride_width, \
const int padding_height, const int padding_width, \
const int dilate_height, const int dilate_width, T *const output_data
// A Cuda kernel to compute the depthwise convolution forward pass
// in NCHW format.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvNCHW(
ARG_DEFINE_KernelDepthwiseConv) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= (output_channels * batch_size * output_height * output_width))
return;
const int w_out = idx % output_width;
const int h_out = (idx / output_width) % output_height;
const int c_out = (idx / output_width / output_height) % output_channels;
const int batch = idx / output_width / output_height / output_channels;
const int c_in = c_out / filter_multiplier;
const T* weight = filter_data + c_out * filter_height * filter_width;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + filter_height * dilate_height;
const int w_in_end = w_in_start + filter_width * dilate_width;
int in_offset =
((batch * input_channels + c_in) * input_height) * input_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
int weight_offset = 0;
#pragma unroll
for (int h_in = h_in_start; h_in < h_in_end; h_in += dilate_height) {
#pragma unroll
for (int w_in = w_in_start; w_in < w_in_end; w_in += dilate_width) {
if (h_in >= h_start && h_in < h_end && w_in >= w_start && w_in < w_end) {
int offset = in_offset + h_in * input_width + w_in;
T in_data = input_data[offset];
if (fuse_relu_before_conv) {
value += weight[weight_offset] * max(0.0f, in_data);
} else {
value += weight[weight_offset] * in_data;
}
}
weight_offset++;
}
}
int index = batch * output_channels * output_height * output_width +
c_out * output_height * output_width + h_out * output_width +
w_out;
output_data[index] = value;
}
// A Cuda kernel to compute the depthwise convolution forward pass
// in NHWC format.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvNHWC(
ARG_DEFINE_KernelDepthwiseConv) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= (output_channels * batch_size * output_height * output_width))
return;
const int c_out = idx % output_channels;
const int w_out = (idx / output_channels) % output_width;
const int h_out = (idx / output_channels / output_width) % output_height;
const int batch = idx / output_width / output_height / output_channels;
const int c_in = c_out / filter_multiplier;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + filter_height * dilate_height;
const int w_in_end = w_in_start + filter_width * dilate_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
int weight_offset = 0;
#pragma unroll
for (int h_in = h_in_start; h_in < h_in_end; h_in += dilate_height) {
#pragma unroll
for (int w_in = w_in_start; w_in < w_in_end; w_in += dilate_width) {
if (h_in >= h_start && h_in < h_end && w_in >= w_start && w_in < w_end) {
int offset = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
T in_data = input_data[offset];
const T* weight = filter_data + weight_offset * output_channels + c_out;
if (fuse_relu_before_conv) {
value += weight[0] * max(0.0f, in_data);
} else {
value += weight[0] * in_data;
}
}
weight_offset++;
}
}
int index = batch * output_channels * output_height * output_width +
h_out * output_width * output_channels + w_out * output_channels +
c_out;
output_data[index] = value;
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvCFilterNCHW(
ARG_DEFINE_KernelDepthwiseConv) {
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const T* weight = filter_data + c_out * c_filter * c_filter;
for (int i = 0; i < c_filter * c_filter; i++) r_weight[i] = weight[i];
for (int w_out = threadIdx.x; w_out < output_width; w_out += blockDim.x) {
for (int h_out = threadIdx.y; h_out < output_height; h_out += blockDim.y) {
const int batch = blockIdx.y;
const int c_out = blockIdx.x;
const int c_in = c_out / filter_multiplier;
T value = 0;
const int h_in_start = -padding_height + h_out * stride_height;
const int w_in_start = -padding_width + w_out * stride_width;
const int h_in_end = h_in_start + c_filter * dilate_height;
const int w_in_end = w_in_start + c_filter * dilate_width;
int in_offset =
((batch * input_channels + c_in) * input_height) * input_width;
const int h_end = h_in_end < input_height ? h_in_end : input_height;
const int w_end = w_in_end < input_width ? w_in_end : input_width;
const int h_start = h_in_start > 0 ? h_in_start : 0;
const int w_start = w_in_start > 0 ? w_in_start : 0;
for (int h_in = h_in_start, h_f = 0; h_f < c_filter;
h_in += dilate_height, h_f++) {
for (int w_in = w_in_start, w_f = 0; w_f < c_filter;
w_in += dilate_width, w_f++) {
if (h_in >= 0 && h_in < input_height && w_in >= 0 &&
w_in < input_width) {
int offset = in_offset + h_in * input_width + w_in;
if (fuse_relu_before_conv) {
value += r_weight[h_f * c_filter + w_f] *
max(0.0f, input_data[offset]);
} else {
value += r_weight[h_f * c_filter + w_f] * input_data[offset];
}
}
}
}
int index =
((batch * gridDim.x + c_out) * output_height + h_out) * output_width +
w_out;
output_data[index] = value;
}
}
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvCFilterNHWC(
ARG_DEFINE_KernelDepthwiseConv) {
const int batch = blockIdx.z;
int h_out = blockIdx.x * dilate_height + blockIdx.y;
if (h_out >= output_height) {
return;
}
int in_offset = batch * input_height * input_width * input_channels;
int out_offset =
(batch * output_height + h_out) * output_width * output_channels;
const int h_in_start = -padding_height + h_out * stride_height;
const int wi_size = (output_width + dilate_width - 1) / dilate_width;
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
for (int c_out = threadIdx.x; c_out < output_channels; c_out += blockDim.x) {
for (int i = 0; i < c_filter * c_filter; i++) {
const T* weight = filter_data + i * output_channels + c_out;
r_weight[i] = weight[0];
}
const int c_in = c_out / filter_multiplier;
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int w_out = i_wi * dilate_width + i_dw;
if (w_out >= output_width) {
continue;
}
T value = 0;
const int w_in_start = -padding_width + w_out * stride_width;
for (int h_in = h_in_start, h_f = 0; h_f < c_filter;
h_in += dilate_height, h_f++) {
for (int w_in = w_in_start, w_f = 0; w_f < c_filter;
w_in += dilate_width, w_f++) {
if (h_in >= 0 && h_in < input_height && w_in >= 0 &&
w_in < input_width) {
int offset =
in_offset + (h_in * input_width + w_in) * input_channels + c_in;
if (fuse_relu_before_conv) {
value += r_weight[h_f * c_filter + w_f] *
max(0.0f, input_data[offset]);
} else {
value += r_weight[h_f * c_filter + w_f] * input_data[offset];
}
}
}
}
int index = out_offset + w_out * output_channels + c_out;
output_data[index] = value;
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvSp(ARG_DEFINE_KernelDepthwiseConv) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvNCHW<T, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
} else {
KernelDepthwiseConvNHWC<T, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvCFilterNCHW<T, c_filter, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
} else {
KernelDepthwiseConvCFilterNHWC<T, c_filter, fuse_relu_before_conv>(
input_data, filter_data, batch_size, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
output_data);
}
}
}
// CUDA kernel to compute the depthwise convolution backprop w.r.t input.
#define ARG_DEFINE_KernelDepthwiseConvInputGrad \
const T *const input_data, const T *const output_grad_data, \
const T *const filter_data, const int batch_size, \
const int output_channels, const int output_height, \
const int output_width, const int input_channels, \
const int input_height, const int input_width, \
const int filter_multiplier, const int filter_height, \
const int filter_width, const int stride_height, const int stride_width, \
const int padding_height, const int padding_width, \
const int dilate_height, const int dilate_width, \
T *const input_grad_data
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradNCHW(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
for (int w_in = threadIdx.x; w_in < input_width; w_in += blockDim.x) {
for (int h_in = threadIdx.y; h_in < input_height; h_in += blockDim.y) {
const int c_out_start = c_in * filter_multiplier;
int h_out_start =
h_in - (filter_height - 1) * dilate_height + padding_height;
int h_out_end = h_in + padding_height;
int w_out_start =
w_in - (filter_width - 1) * dilate_width + padding_width;
int w_out_end = w_in + padding_width;
T value = 0;
int index =
((batch * gridDim.x + c_in) * input_height + h_in) * input_width +
w_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_out = c_out_start; c_out < c_out_start + filter_multiplier;
c_out++) {
int filter_offset = (c_out + 1) * filter_height * filter_width;
for (int h_out = h_out_start; h_out <= h_out_end;
h_out += dilate_height) {
for (int w_out = w_out_start; w_out <= w_out_end;
w_out += dilate_width) {
filter_offset--;
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_channels + c_out) * output_height +
s_h_out) *
output_width +
s_w_out;
value += output_grad_data[output_grad_offset] *
filter_data[filter_offset];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradNHWC(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int batch = blockIdx.z;
int h_in = blockIdx.x * dilate_height + blockIdx.y;
if (h_in >= input_height) {
return;
}
for (int c_in = threadIdx.x; c_in < input_channels; c_in += blockDim.x) {
for (int w_in = threadIdx.y; w_in < input_width; w_in += blockDim.y) {
int h_out_start =
h_in - (filter_height - 1) * dilate_height + padding_height;
int w_out_start =
w_in - (filter_width - 1) * dilate_width + padding_width;
T value = 0;
int index = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
int weight_offset = filter_height * filter_width;
for (int h_out = h_out_start, h_f = 0; h_f < filter_height;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < filter_width;
w_out += dilate_width, w_f++) {
weight_offset--;
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_height + s_h_out) * output_width + s_w_out) *
output_channels +
c_out;
int filter_offset = weight_offset * output_channels + c_out;
value += output_grad_data[output_grad_offset] *
filter_data[filter_offset];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter, int c_filter_multiplier,
bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradCFilterNCHW(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
const int kWeightSize = c_filter * c_filter * c_filter_multiplier + 1;
T r_weight[kWeightSize];
const int batch = blockIdx.y;
const int c_in = blockIdx.x;
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
const T* weight = filter_data + c_out * c_filter * c_filter;
for (int i = 0; i < c_filter * c_filter; i++)
r_weight[i + c_i * c_filter * c_filter] =
weight[c_filter * c_filter - i - 1];
}
for (int w_in = threadIdx.x; w_in < input_width; w_in += blockDim.x) {
for (int h_in = threadIdx.y; h_in < input_height; h_in += blockDim.y) {
int h_out_start = h_in - (c_filter - 1) * dilate_height + padding_height;
int w_out_start = w_in - (c_filter - 1) * dilate_width + padding_width;
T value = 0;
int index =
((batch * gridDim.x + c_in) * input_height + h_in) * input_width +
w_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < filter_multiplier; c_i++) {
int c_out = c_in * filter_multiplier + c_i;
for (int h_out = h_out_start, h_f = 0; h_f < c_filter;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < c_filter;
w_out += dilate_width, w_f++) {
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_channels + c_out) * output_height +
s_h_out) *
output_width +
s_w_out;
value +=
output_grad_data[output_grad_offset] *
r_weight[h_f * c_filter + w_f + c_i * c_filter * c_filter];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter, int c_filter_multiplier,
bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvInputGradCFilterNHWC(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
int h_in = blockIdx.x * dilate_height + blockIdx.y;
if (h_in >= input_height) {
return;
}
const int kWeightSize = c_filter * c_filter * c_filter_multiplier + 1;
T r_weight[kWeightSize];
const int batch = blockIdx.z;
const int wi_size = (input_width + dilate_width - 1) / dilate_width;
const int h_out_start =
h_in - (c_filter - 1) * dilate_height + padding_height;
for (int c_in = threadIdx.x; c_in < input_channels; c_in += blockDim.x) {
for (int c_i = 0; c_i < c_filter_multiplier; c_i++) {
int c_out = c_in * c_filter_multiplier + c_i;
for (int i = 0; i < c_filter * c_filter; i++)
r_weight[i + c_i * c_filter * c_filter] =
filter_data[(c_filter * c_filter - i - 1) * output_channels +
c_out];
}
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int w_in = i_wi * dilate_width + i_dw;
if (w_in >= input_width) {
continue;
}
int w_out_start = w_in - (c_filter - 1) * dilate_width + padding_width;
T value = 0;
int index = ((batch * input_height + h_in) * input_width + w_in) *
input_channels +
c_in;
if (fuse_relu_before_conv) {
if (input_data[index] <= 0) {
input_grad_data[index] = 0;
continue;
}
}
for (int c_i = 0; c_i < c_filter_multiplier; c_i++) {
int c_out = c_in * c_filter_multiplier + c_i;
for (int h_out = h_out_start, h_f = 0; h_f < c_filter;
h_out += dilate_height, h_f++) {
for (int w_out = w_out_start, w_f = 0; w_f < c_filter;
w_out += dilate_width, w_f++) {
int s_h_out = h_out / stride_height;
int s_w_out = w_out / stride_width;
if (h_out % stride_height == 0 && w_out % stride_width == 0 &&
s_h_out >= 0 && s_h_out < output_height && s_w_out >= 0 &&
s_w_out < output_width) {
int output_grad_offset =
((batch * output_height + s_h_out) * output_width + s_w_out) *
output_channels +
c_out;
value +=
output_grad_data[output_grad_offset] *
r_weight[h_f * c_filter + w_f + c_i * c_filter * c_filter];
}
}
}
}
input_grad_data[index] = value;
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvInputGradSp(
ARG_DEFINE_KernelDepthwiseConvInputGrad) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter_multiplier == 0 || c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvInputGradNCHW<T, fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, final_filter_multiplier, filter_height,
filter_width, h_stride, w_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
} else {
KernelDepthwiseConvInputGradNHWC<T, fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, final_filter_multiplier, filter_height,
filter_width, h_stride, w_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvInputGradCFilterNCHW<T, c_filter, c_filter_multiplier,
fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, c_filter_multiplier, filter_height,
filter_width, c_stride, c_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
} else {
KernelDepthwiseConvInputGradCFilterNHWC<T, c_filter, c_filter_multiplier,
fuse_relu_before_conv>(
input_data, output_grad_data, filter_data, batch_size,
output_channels, output_height, output_width, input_channels,
input_height, input_width, c_filter_multiplier, filter_height,
filter_width, c_stride, c_stride, padding_height, padding_width,
dilate_height, dilate_width, input_grad_data);
}
}
}
// Cuda kernel to compute the depthwise convolution backprop w.r.t. filter.
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradNCHW(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
T s = 0;
int gbid = ((blockIdx.z * gridDim.y) + blockIdx.y) * gridDim.x + blockIdx.x;
for (int image_w = threadIdx.x; image_w < output_width;
image_w += blockDim.x) {
for (int bid = 0; bid < num; bid++) {
for (int image_h = threadIdx.y; image_h < output_height;
image_h += blockDim.y) {
int kernel_id = blockIdx.z;
int kernel_h = blockIdx.y * dilate_height - padding_height;
int kernel_w = blockIdx.x * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
#define gaid(N, C, H, W) \
((((N)*gridDim.z + (C)) * output_height + (H)) * output_width + (W))
int input_id = ((bid * (gridDim.z / filter_multiplier) +
kernel_id / filter_multiplier) *
input_height +
image_hk) *
input_width +
image_wk;
if (fuse_relu_before_conv) {
s += output_grad_data[gaid(bid, kernel_id, image_h, image_w)] *
max(0.0f, input_data[input_id]);
} else {
s += output_grad_data[gaid(bid, kernel_id, image_h, image_w)] *
input_data[input_id];
}
#undef gaid
}
}
}
T val = BlockReduceSum(s);
platform::CudaAtomicAdd(&filter_grad_data[gbid], val);
}
template <typename T, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradNHWC(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
int bid = blockIdx.z;
int image_h = blockIdx.y;
int kernel_iw = blockIdx.x % filter_width;
int kernel_ih = blockIdx.x / filter_width;
for (int kernel_id = threadIdx.x; kernel_id < output_channels;
kernel_id += blockDim.x) {
T s = 0;
int gbid =
((kernel_id * filter_height) + kernel_ih) * filter_width + kernel_iw;
for (int image_w = threadIdx.y; image_w < output_width;
image_w += blockDim.y) {
int kernel_h = kernel_ih * dilate_height - padding_height;
int kernel_w = kernel_iw * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
#define gaid(N, H, W, C) \
((((N)*output_height + (H)) * output_width + (W)) * output_channels + (C))
int input_id =
((bid * input_height + image_hk) * input_width + image_wk) *
input_channels +
kernel_id / filter_multiplier;
if (fuse_relu_before_conv) {
s += output_grad_data[gaid(bid, image_h, image_w, kernel_id)] *
max(0.0f, input_data[input_id]);
} else {
s += output_grad_data[gaid(bid, image_h, image_w, kernel_id)] *
input_data[input_id];
}
#undef gaid
}
platform::CudaAtomicAdd(&filter_grad_data[gbid], s);
}
}
template <typename T, int c_filter, bool fuse_relu_before_conv>
__device__ __inline__ void KernelDepthwiseConvFilterGradCFilterNHWC(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
const int bid = blockIdx.z;
int image_h = blockIdx.x * dilate_height + blockIdx.y;
if (image_h >= output_height) {
return;
}
const int kWeightSize = c_filter * c_filter;
T r_weight[kWeightSize];
const int wi_size = (output_width + dilate_width - 1) / dilate_width;
for (int kernel_id = threadIdx.x; kernel_id < output_channels;
kernel_id += blockDim.x) {
for (int i = 0; i < c_filter * c_filter; ++i) {
r_weight[i] = 0;
}
for (int i = threadIdx.y; i < wi_size * dilate_width; i += blockDim.y) {
int i_dw = i / wi_size;
int i_wi = i - i_dw * wi_size;
int image_w = i_wi * dilate_width + i_dw;
if (image_w >= output_width) {
continue;
}
for (int kernel_ih = 0; kernel_ih < c_filter; ++kernel_ih) {
for (int kernel_iw = 0; kernel_iw < c_filter; ++kernel_iw) {
int kernel_h = kernel_ih * dilate_height - padding_height;
int kernel_w = kernel_iw * dilate_width - padding_width;
int image_hk = image_h * stride_height + kernel_h;
int image_wk = image_w * stride_width + kernel_w;
if (image_hk < 0 || image_hk >= input_height) continue;
if (image_wk < 0 || image_wk >= input_width) continue;
int input_id =
((bid * input_height + image_hk) * input_width + image_wk) *
input_channels +
kernel_id / filter_multiplier;
int output_id =
((bid * output_height + image_h) * output_width + image_w) *
output_channels +
kernel_id;
T s = 0;
if (fuse_relu_before_conv) {
s = output_grad_data[output_id] * max(0.0f, input_data[input_id]);
} else {
s = output_grad_data[output_id] * input_data[input_id];
}
r_weight[kernel_ih * c_filter + kernel_iw] += s;
}
}
}
for (int i = 0; i < c_filter * c_filter; ++i) {
T* weight = filter_grad_data + i * output_channels + kernel_id;
platform::CudaAtomicAdd(&weight[0], r_weight[i]);
}
}
}
template <typename T, int c_filter_multiplier, int c_stride, int c_filter,
DataLayout data_layout, bool fuse_relu_before_conv>
__global__ void KernelDepthwiseConvFilterGradSp(
const T* output_grad_data, const T* input_data, const int num,
const int output_channels, const int output_height, const int output_width,
const int input_channels, const int input_height, const int input_width,
const int filter_multiplier, const int filter_height,
const int filter_width, const int stride_height, const int stride_width,
const int padding_height, const int padding_width, const int dilate_height,
const int dilate_width, T* filter_grad_data) {
int final_filter_multiplier = filter_multiplier;
int h_stride = stride_height;
int w_stride = stride_width;
if (c_filter_multiplier != 0) {
final_filter_multiplier = c_filter_multiplier;
h_stride = c_stride;
w_stride = c_stride;
}
if (c_filter_multiplier == 0 || c_filter == -1) {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvFilterGradNCHW<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
} else {
KernelDepthwiseConvFilterGradNHWC<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
}
} else {
if (data_layout != DataLayout::kNHWC) {
KernelDepthwiseConvFilterGradNCHW<T, fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
} else {
KernelDepthwiseConvFilterGradCFilterNHWC<T, c_filter,
fuse_relu_before_conv>(
output_grad_data, input_data, num, output_channels, output_height,
output_width, input_channels, input_height, input_width,
final_filter_multiplier, filter_height, filter_width, h_stride,
w_stride, padding_height, padding_width, dilate_height, dilate_width,
filter_grad_data);
}
}
}
/*
* All tensors are in NCHW format.
* Ksize, strides, paddings are two elements. These two elements represent
* height and width, respectively.
*/
template <class T, bool fuse_relu_before_conv>
class DepthwiseConvFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& filter,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations, framework::Tensor* output,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output->dims()[1]
: output->dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output->dims()[2]
: output->dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output->dims()[3]
: output->dims()[2]);
const int ksize_height = filter.dims()[2];
const int ksize_width = filter.dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* filter_data = filter.data<T>();
T* output_data = output->mutable_data<T>(context.GetPlace());
framework::Tensor filter_hwc;
if (data_layout == DataLayout::kNHWC) {
framework::DDim filter_hwc_dims({filter.dims()[2], filter.dims()[3],
filter.dims()[0], filter.dims()[1]});
filter_hwc.Resize(filter_hwc_dims);
filter_hwc.mutable_data<T>(context.GetPlace());
std::vector<int> perm_axis({2, 3, 0, 1});
math::TransposeNormal<platform::CUDADeviceContext, T> trans;
trans(context, filter, &filter_hwc, perm_axis);
filter_data = filter_hwc.data<T>();
}
int thread = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (output_width > 1024 && output_width <= 2048)
thread = (output_width - 1) / 2 + 1;
else if (output_width > 512 && output_width <= 1024)
thread = output_width;
#ifdef __HIPCC__
thread = std::min(thread, 256);
#endif
blocks = std::min(std::max(thread / output_width, 1), output_height);
threads = dim3(std::min(output_width, thread), blocks, 1);
grid = dim3(output_channels, batch_size, 1);
} else {
#ifdef __HIPCC__
thread = std::min(thread, 256);
#endif
blocks = std::min(
std::max(thread / output_channels, 1),
((output_width + dilate_width - 1) / dilate_width) * dilate_width);
threads = dim3(std::min(output_channels, thread), blocks, 1);
grid = dim3((output_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
}
int filter_multiplier = output_channels / input_channels;
int nums_output =
batch_size * output_channels * output_height * output_width;
#ifdef __HIPCC__
int block_size = 256;
#else
int block_size = 512;
#endif
int grid_size = (nums_output + block_size - 1) / block_size;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (c_filter == -1) { \
threads.x = block_size; \
grid.x = grid_size; \
threads.y = threads.z = grid.y = grid.z = 1; \
} \
if (data_layout != DataLayout::kNHWC) { \
KernelDepthwiseConvSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
input_data, filter_data, batch_size, output_channels, output_height, \
output_width, input_channels, input_height, input_width, \
filter_multiplier, ksize_height, ksize_width, stride_height, \
stride_width, padding_height, padding_width, dilate_height, \
dilate_width, output_data); \
} else { \
KernelDepthwiseConvSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
input_data, filter_data, batch_size, output_channels, output_height, \
output_width, input_channels, input_height, input_width, \
filter_multiplier, ksize_height, ksize_width, stride_height, \
stride_width, padding_height, padding_width, dilate_height, \
dilate_width, output_data); \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
// NOTE(liangdun): 0,0 for other case
// add other case if needed, e.g. check_case(2^n,1)
#undef check_case
}
};
template <typename T, bool fuse_relu_before_conv>
class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& filter,
const framework::Tensor& output_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
framework::Tensor* input_grad,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[1]
: output_grad.dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[2]
: output_grad.dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[3]
: output_grad.dims()[2]);
const int ksize_height = filter.dims()[2];
const int ksize_width = filter.dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* filter_data = filter.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* input_grad_data = input_grad->mutable_data<T>(context.GetPlace());
framework::Tensor filter_hwc;
if (data_layout == DataLayout::kNHWC) {
framework::DDim filter_hwc_dims({filter.dims()[2], filter.dims()[3],
filter.dims()[0], filter.dims()[1]});
filter_hwc.Resize(filter_hwc_dims);
filter_hwc.mutable_data<T>(context.GetPlace());
std::vector<int> perm_axis({2, 3, 0, 1});
math::TransposeNormal<platform::CUDADeviceContext, T> trans;
trans(context, filter, &filter_hwc, perm_axis);
filter_data = filter_hwc.data<T>();
}
int thread = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (input_width > 1024 && input_width <= 2048) {
thread = (input_width - 1) / 2 + 1;
} else if (input_width > 512 && input_width <= 1024) {
thread = input_width;
}
blocks = std::min(std::max(thread / input_width, 1), input_height);
threads = dim3(std::min(input_width, thread), blocks, 1);
grid = dim3(input_channels, batch_size, 1);
} else {
blocks = std::min(
std::max(thread / input_channels, 1),
((input_width + dilate_width - 1) / dilate_width) * dilate_width);
threads = dim3(std::min(input_channels, thread), blocks, 1);
grid = dim3((input_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
}
int filter_multiplier = output_channels / input_channels;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (data_layout != DataLayout::kNHWC) { \
KernelDepthwiseConvInputGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
input_data, output_grad_data, filter_data, batch_size, \
output_channels, output_height, output_width, input_channels, \
input_height, input_width, filter_multiplier, ksize_height, \
ksize_width, stride_height, stride_width, padding_height, \
padding_width, dilate_height, dilate_width, input_grad_data); \
} else { \
KernelDepthwiseConvInputGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
input_data, output_grad_data, filter_data, batch_size, \
output_channels, output_height, output_width, input_channels, \
input_height, input_width, filter_multiplier, ksize_height, \
ksize_width, stride_height, stride_width, padding_height, \
padding_width, dilate_height, dilate_width, input_grad_data); \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
// NOTE(liangdun): 0,0 for other case
// add other case if needed, e.g. check_case(2^n,1)
#undef check_case
}
};
template <typename T, bool fuse_relu_before_conv>
class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext, T,
fuse_relu_before_conv> {
public:
void operator()(const platform::CUDADeviceContext& context,
const framework::Tensor& input,
const framework::Tensor& output_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings,
const std::vector<int>& dilations,
framework::Tensor* filter_grad,
const DataLayout data_layout = DataLayout::kNCHW) {
const int batch_size = input.dims()[0];
const int input_channels =
(data_layout != DataLayout::kNHWC ? input.dims()[1] : input.dims()[3]);
const int input_height =
(data_layout != DataLayout::kNHWC ? input.dims()[2] : input.dims()[1]);
const int input_width =
(data_layout != DataLayout::kNHWC ? input.dims()[3] : input.dims()[2]);
const int output_channels =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[1]
: output_grad.dims()[3]);
const int output_height =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[2]
: output_grad.dims()[1]);
const int output_width =
(data_layout != DataLayout::kNHWC ? output_grad.dims()[3]
: output_grad.dims()[2]);
const int ksize_height = filter_grad->dims()[2];
const int ksize_width = filter_grad->dims()[3];
const int stride_height = strides[0];
const int stride_width = strides[1];
const int padding_height = paddings[0];
const int padding_width = paddings[1];
const int dilate_height = dilations[0];
const int dilate_width = dilations[1];
const T* input_data = input.data<T>();
const T* output_grad_data = output_grad.data<T>();
T* filter_grad_data = filter_grad->mutable_data<T>(context.GetPlace());
int block_size = 512;
int blocks;
dim3 threads;
dim3 grid;
if (data_layout != DataLayout::kNHWC) {
if (output_width > 1024 && output_width <= 2048) {
block_size = (output_width - 1) / 2 + 1;
} else if (output_width > 512 && output_width <= 1024) {
block_size = output_width;
}
blocks = std::min(std::max(block_size / output_width, 1), output_height);
grid = dim3(ksize_width, ksize_height, output_channels);
threads = dim3(std::min(output_width, block_size), blocks, 1);
} else {
blocks = std::min(
std::max(block_size / output_channels, 1),
((output_width + dilate_width - 1) / dilate_width) * dilate_width);
grid = dim3((output_height + dilate_height - 1) / dilate_height,
dilate_height, batch_size);
threads = dim3(std::min(output_channels, block_size), blocks, 1);
}
int filter_multiplier = output_channels / input_channels;
#define check_case(c_filter_multiplier, c_stride, c_filter) \
if (c_filter_multiplier == 0 || \
filter_multiplier == c_filter_multiplier && \
stride_height == stride_width && stride_height == c_stride && \
(ksize_height == ksize_width && ksize_height == c_filter || \
c_filter == -1)) { \
if (data_layout != DataLayout::kNHWC) { \
KernelDepthwiseConvFilterGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNCHW, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
output_grad_data, input_data, batch_size, output_channels, \
output_height, output_width, input_channels, input_height, \
input_width, filter_multiplier, ksize_height, ksize_width, \
stride_height, stride_width, padding_height, padding_width, \
dilate_height, dilate_width, filter_grad_data); \
} else { \
framework::Tensor filter_grad_hwc; \
if (c_filter != -1) { \
framework::DDim filter_grad_hwc_dims( \
{filter_grad->dims()[2], filter_grad->dims()[3], \
filter_grad->dims()[0], filter_grad->dims()[1]}); \
filter_grad_hwc.Resize(filter_grad_hwc_dims); \
filter_grad_hwc.mutable_data<T>(context.GetPlace()); \
math::SetConstant<platform::CUDADeviceContext, T> set_zero; \
set_zero(context, &filter_grad_hwc, static_cast<T>(0)); \
filter_grad_data = filter_grad_hwc.data<T>(); \
} else { \
block_size = 512; \
if (output_channels > 1024 && output_channels <= 2048) { \
block_size = (output_channels - 1) / 2 + 1; \
} else if (output_channels > 512 && output_channels <= 1024) { \
block_size = output_channels; \
} \
blocks = \
std::min(std::max(block_size / output_channels, 1), output_width); \
grid = dim3(ksize_width * ksize_height, output_height, batch_size); \
threads = dim3(std::min(output_channels, block_size), blocks, 1); \
} \
KernelDepthwiseConvFilterGradSp< \
T, c_filter_multiplier, c_stride, c_filter, DataLayout::kNHWC, \
fuse_relu_before_conv><<<grid, threads, 0, context.stream()>>>( \
output_grad_data, input_data, batch_size, output_channels, \
output_height, output_width, input_channels, input_height, \
input_width, filter_multiplier, ksize_height, ksize_width, \
stride_height, stride_width, padding_height, padding_width, \
dilate_height, dilate_width, filter_grad_data); \
if (c_filter != -1) { \
std::vector<int> perm_axis({2, 3, 0, 1}); \
math::TransposeNormal<platform::CUDADeviceContext, T> trans; \
trans(context, filter_grad_hwc, filter_grad, perm_axis); \
} \
} \
return; \
}
check_case(1, 1, 3);
check_case(1, 1, 5);
check_case(1, 1, -1);
check_case(1, 2, 3);
check_case(1, 2, 5);
check_case(1, 2, -1);
check_case(2, 1, 3);
check_case(2, 1, 5);
check_case(2, 1, -1);
check_case(2, 2, 3);
check_case(2, 2, 5);
check_case(2, 2, -1);
check_case(0, 0, -1);
#undef check_case
}
};
template class DepthwiseConvFunctor<platform::CUDADeviceContext, float, false>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, double, false>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float,
false>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext,
double, false>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
float, false>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
double, false>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, float, true>;
template class DepthwiseConvFunctor<platform::CUDADeviceContext, double, true>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext, float,
true>;
template class DepthwiseConvInputGradFunctor<platform::CUDADeviceContext,
double, true>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
float, true>;
template class DepthwiseConvFilterGradFunctor<platform::CUDADeviceContext,
double, true>;
} // namespace math
} // namespace operators
} // namespace paddle
|
11dcf5442728171844740be33b925760e58484c4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel4(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n/2) {
scratch[threadIdx.x] = g_idata[i] + g_idata[i + (n/2) ];;
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = (blockDim.x>>1); s >32; s = s>>1) {
if(threadIdx.x<s) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x<32){
volatile dtype* scratches = scratch;
if(n>64){
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 32];
}
if(n>32){
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 16];
}
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 8];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 4];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 2];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_4, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 4;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (hipMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (hipMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (hipMemcpy (d_idata, h_idata, N * sizeof (dtype),
hipMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
hipDeviceSynchronize ();
stopwatch_start (timer);
/* execute kernel */
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
hipLaunchKernelGGL(( kernel4) , dim3(gb), dim3(tb), 0, 0, d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
hipDeviceSynchronize ();
t_kernel_4 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4);
double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (hipMemcpy (&h_odata, d_odata, sizeof (dtype),
hipMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
| 11dcf5442728171844740be33b925760e58484c4.cu | #include <stdlib.h>
#include <stdio.h>
#include "timer.h"
#include "cuda_utils.h"
typedef float dtype;
#define N_ (8 * 1024 * 1024)
#define MAX_THREADS 256
#define MAX_BLOCKS 64
#define MIN(x,y) ((x < y) ? x : y)
/* return the next power of 2 number that is larger than x */
unsigned int nextPow2( unsigned int x ) {
--x;
x |= x >> 1;
x |= x >> 2;
x |= x >> 4;
x |= x >> 8;
x |= x >> 16;
return ++x;
}
/* find out # of threads and # thread blocks for a particular kernel */
void getNumBlocksAndThreads(int whichKernel, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
if (whichKernel < 3)
{
/* 1 thread per element */
threads = (n < maxThreads) ? nextPow2(n) : maxThreads;
blocks = (n + threads - 1) / threads;
}
else
{
/* 1 thread per 2 elements */
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
}
/* limit the total number of threads */
if (whichKernel == 5)
blocks = MIN(maxBlocks, blocks);
}
/* special type of reduction to account for floating point error */
dtype reduce_cpu(dtype *data, int n) {
dtype sum = data[0];
dtype c = (dtype)0.0;
for (int i = 1; i < n; i++)
{
dtype y = data[i] - c;
dtype t = sum + y;
c = (t - sum) - y;
sum = t;
}
return sum;
}
__global__ void
kernel4(dtype *g_idata, dtype *g_odata, unsigned int n)
{
__shared__ dtype scratch[MAX_THREADS];
unsigned int bid = gridDim.x * blockIdx.y + blockIdx.x;
unsigned int i = bid * blockDim.x + threadIdx.x;
if(i < n/2) {
scratch[threadIdx.x] = g_idata[i] + g_idata[i + (n/2) ];;
} else {
scratch[threadIdx.x] = 0;
}
__syncthreads ();
for(unsigned int s = (blockDim.x>>1); s >32; s = s>>1) {
if(threadIdx.x<s) {
scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + s];
}
__syncthreads();
}
if(threadIdx.x<32){
volatile dtype* scratches = scratch;
if(n>64){
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 32];
}
if(n>32){
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 16];
}
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 8];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 4];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 2];
scratches[threadIdx.x] = scratches[threadIdx.x] + scratches[threadIdx.x + 1];
}
if(threadIdx.x == 0) {
g_odata[bid] = scratch[0];
}
}
int
main(int argc, char** argv)
{
int i;
/* data structure */
dtype *h_idata, h_odata, h_cpu;
dtype *d_idata, *d_odata;
/* timer */
struct stopwatch_t* timer = NULL;
long double t_kernel_4, t_cpu;
/* which kernel are we running */
int whichKernel;
/* number of threads and thread blocks */
int threads, blocks;
int N;
if(argc > 1) {
N = atoi (argv[1]);
printf("N: %d\n", N);
} else {
N = N_;
printf("N: %d\n", N);
}
/* naive kernel */
whichKernel = 4;
getNumBlocksAndThreads (whichKernel, N, MAX_BLOCKS, MAX_THREADS,
blocks, threads);
/* initialize timer */
stopwatch_init ();
timer = stopwatch_create ();
/* allocate memory */
h_idata = (dtype*) malloc (N * sizeof (dtype));
CUDA_CHECK_ERROR (cudaMalloc (&d_idata, N * sizeof (dtype)));
CUDA_CHECK_ERROR (cudaMalloc (&d_odata, blocks * sizeof (dtype)));
/* Initialize array */
srand48(time(NULL));
for(i = 0; i < N; i++) {
h_idata[i] = drand48() / 100000;
}
CUDA_CHECK_ERROR (cudaMemcpy (d_idata, h_idata, N * sizeof (dtype),
cudaMemcpyHostToDevice));
/* ================================================== */
/* GPU kernel */
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
/* warm up */
kernel4 <<<gb, tb>>> (d_idata, d_odata, N);
cudaThreadSynchronize ();
stopwatch_start (timer);
/* execute kernel */
kernel4 <<<gb, tb>>> (d_idata, d_odata, N);
int s = blocks;
while(s > 1) {
threads = 0;
blocks = 0;
getNumBlocksAndThreads (whichKernel, s, MAX_BLOCKS, MAX_THREADS, blocks, threads);
dim3 gb(blocks, 1, 1);
dim3 tb(threads, 1, 1);
kernel4 <<<gb, tb>>> (d_odata, d_odata, s);
s = (s + threads * 2 - 1) / (threads * 2);
}
cudaThreadSynchronize ();
t_kernel_4 = stopwatch_stop (timer);
fprintf (stdout, "Time to execute unrolled GPU reduction kernel: %Lg secs\n", t_kernel_4);
double bw = (N * sizeof(dtype)) / (t_kernel_4 * 1e9);
fprintf (stdout, "Effective bandwidth: %.2lf GB/s\n", bw);
/* copy result back from GPU */
CUDA_CHECK_ERROR (cudaMemcpy (&h_odata, d_odata, sizeof (dtype),
cudaMemcpyDeviceToHost));
/* ================================================== */
/* ================================================== */
/* CPU kernel */
stopwatch_start (timer);
h_cpu = reduce_cpu (h_idata, N);
t_cpu = stopwatch_stop (timer);
fprintf (stdout, "Time to execute naive CPU reduction: %Lg secs\n",
t_cpu);
/* ================================================== */
if(abs (h_odata - h_cpu) > 1e-5) {
fprintf(stderr, "FAILURE: GPU: %f CPU: %f\n", h_odata, h_cpu);
} else {
printf("SUCCESS: GPU: %f CPU: %f\n", h_odata, h_cpu);
}
return 0;
}
|
010572193496a1d26b019d429cf22606402dcb3e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2020 Marcel Wagenlnder
#include <math.h>
#include "elesq.h"
__global__ void invsqrt(float *x, float epsilon, int num_elements) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_elements) x[idx] = 1 / (sqrtf(x[idx]) + epsilon);
}
void inverse_sqrt(float *x, float epsilon, int num_elements) {
int num_threads = 1024;
int num_blocks = ceil((float) num_elements / (float) num_threads);
hipLaunchKernelGGL(( invsqrt), dim3(num_blocks), dim3(num_threads), 0, 0, x, epsilon, num_elements);
}
| 010572193496a1d26b019d429cf22606402dcb3e.cu | // Copyright 2020 Marcel Wagenländer
#include <math.h>
#include "elesq.h"
__global__ void invsqrt(float *x, float epsilon, int num_elements) {
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < num_elements) x[idx] = 1 / (sqrtf(x[idx]) + epsilon);
}
void inverse_sqrt(float *x, float epsilon, int num_elements) {
int num_threads = 1024;
int num_blocks = ceil((float) num_elements / (float) num_threads);
invsqrt<<<num_blocks, num_threads>>>(x, epsilon, num_elements);
}
|
9d7e0187b4bc78c44b07f666e0a78b63192ef956.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
hipMalloc(&d_x, N*sizeof(float));
hipMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
hipMemcpy(d_x, x, N*sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_y, y, N*sizeof(float), hipMemcpyHostToDevice);
// Perform SAXPY on 1M elements
hipLaunchKernelGGL(( saxpy), dim3((N+255)/256), dim3(256), 0, 0, N, 2.0f, d_x, d_y);
hipMemcpy(y, d_y, N*sizeof(float), hipMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
hipFree(d_x);
hipFree(d_y);
free(x);
free(y);
hipError_t e=hipGetLastError(); \
if(e!=hipSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,hipGetErrorString(e)); \
exit(0);
}
}
| 9d7e0187b4bc78c44b07f666e0a78b63192ef956.cu | #include <stdio.h>
__global__
void saxpy(int n, float a, float *x, float *y)
{
int i = blockIdx.x*blockDim.x + threadIdx.x;
if (i < n) y[i] = a*x[i] + y[i];
}
int main(void)
{
int N = 1<<20;
float *x, *y, *d_x, *d_y;
x = (float*)malloc(N*sizeof(float));
y = (float*)malloc(N*sizeof(float));
cudaMalloc(&d_x, N*sizeof(float));
cudaMalloc(&d_y, N*sizeof(float));
for (int i = 0; i < N; i++) {
x[i] = 1.0f;
y[i] = 2.0f;
}
cudaMemcpy(d_x, x, N*sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_y, y, N*sizeof(float), cudaMemcpyHostToDevice);
// Perform SAXPY on 1M elements
saxpy<<<(N+255)/256, 256>>>(N, 2.0f, d_x, d_y);
cudaMemcpy(y, d_y, N*sizeof(float), cudaMemcpyDeviceToHost);
float maxError = 0.0f;
for (int i = 0; i < N; i++)
maxError = max(maxError, abs(y[i]-4.0f));
printf("Max error: %f\n", maxError);
cudaFree(d_x);
cudaFree(d_y);
free(x);
free(y);
cudaError_t e=cudaGetLastError(); \
if(e!=cudaSuccess) { \
printf("Cuda failure %s:%d: '%s'\n",__FILE__,__LINE__,cudaGetErrorString(e)); \
exit(0);
}
}
|
53d7a0ce1567c6541628c71a102d5699598f5421.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define THREADS_PER_BLOCK 1024
#define MAX_NUMBER_BLOCKS 2496
long int factorial(int x);
long int nCr(int n, int r);
__global__ void counting(int * fTable, char * tTable, int row, int col, int nCr, int cardinality){
__shared__ int cache[THREADS_PER_BLOCK]; //cache memory that is shared by all the threads within a block
int bIndex = blockIdx.x; //the index value of the core
int cacheIndex = threadIdx.x; //each thread within a core has a corresponding cache index where it stores its values
//enter a block loop where the core index must remain lower than the amount of item sets present in the frequency table
//at the end of each iteration the core index is increased by the amount of cores being used and loops again if possible
for(int h = bIndex; h < nCr; h+= gridDim.x){
int tIndex = threadIdx.x; //the index value of the individual thread
int sum = 0; //keeps track of how many times an item set has been found
int found; //a boolean value that indicates whether an item set is present within a transaction; either 0 or 1
//enter a thread loop where i represents which transaction being scanned. Each thread within a core scans a
// different transaction; the loop is necessary since there aren't enough threads for each transaction. Whenever
// a scan is done i is incremented by th number of threads per block
for(int i = tIndex; i < row; i+= blockDim.x){
found = 1;
//enter a loop where j represents the specific item within an item set; the iterations within the for loop
// is dependent on the cardinality of the item sets
for(int j = 0; j < cardinality; j++){
//if an item indicated in the frequency table is not found in the transaction found is set to 0; i.e. false
if(tTable[i * col + (fTable[bIndex * (cardinality + 1) + j])] != '1'){
found = 0;
}
}
//if found equals 1 then the sum variable is incremented by 1
if(found == 1){
sum++;
}
}
//once any given thread exits the thread the thread loop it stores its sum value to its corresponding cache index
cache[cacheIndex] = sum;
//the threads are synced before the overall sum is calculated to ensure all threads have finished counting;
__syncthreads();
//the cache is then reduced to obtain the total sum for any given item set every iteration adds two cache location
//together until the sum is stored at cache[0]
int k = THREADS_PER_BLOCK/2;
while(k != 0){
if(cacheIndex < k){
cache[cacheIndex] += cache[cacheIndex + k];
}
__syncthreads();
k /= 2;
}
//takes the overall of the item set for the core index that is monitoring this specific item set and enters it into the
//corresponding count column within the frequency table
if(cacheIndex == 0){
fTable[bIndex * (cardinality + 1) + cardinality] = cache[0];
}
__syncthreads();
//the core index value is incremented by the number of cores being used
bIndex += gridDim.x;
}
}
__global__ void validSets(int * fTable, int cardinality, int nCr, int mSupport){
int tIndex = blockIdx.x * blockDim.x + threadIdx.x;
if((tIndex < (cardinality + 1) * nCr) && (tIndex % (cardinality + 1) == cardinality)){
if(fTable[tIndex] < mSupport){
fTable[tIndex] = 0;
}
}
}
int main() {
//Variable declarations
FILE *fPointer;
char singleLine[100];
int max = 0; //Contains the largest integer occurence in the given database
int size = 0; //Contains the number of lines in the given database
int cardinality = 1; //Contains the initial cardinality of the item sets
int temp;
int i = 0;
int j, k, num, count;
int mSupport = 8000; //Contains the support count; set to approx 10% of all transactions
char val;
int numBlocks = 0;
//While loop that traverses through the database and returns the number of transactions
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%c", &val);
while(!feof(fPointer)){
if(val == '\n'){
size++;
}
fscanf(fPointer, "%c", &val);
}
fclose(fPointer);
printf("\nNumber of Transcations: %d\n", size);
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%d", &temp);
printf("ID number of first item: %d\n", temp);
//Traverses through each transaction in order to find the max value.
while(!feof(fPointer)){
fscanf(fPointer, "%d", &temp);
if(max < temp){
max = temp;
}
}
fclose(fPointer);
printf("Largest ID number found: %d\n", max);
printf("\nSH: initializing transaction array\n");
//Creation of table
char *cTable = (char*)malloc(sizeof(char) * (max + 1) * size); //Allocates an array of characters for each transaction
for(i=0; i < (max+1)*size; i++) {
// memset(cTable[i], '\0', sizeof(char) * (max + 1) * size); //Initialize all values to 0.
cTable[i] = '\0';
}
printf("SH: initialization of transaction array COMPLETE\n");
printf("\nSH: populating transaction array\n");
char line[400];
char *cNum;
fPointer = fopen("retail.dat", "r");
for(i = 0; i < size; i++){
fgets(line, 400, fPointer);
cNum = strtok(line, " \n");
while(cNum != NULL){
num = atoi(cNum);
cTable[i * (max + 1) + num] = '1';
cNum = strtok(NULL, " \n");
}
}
printf("SH: populating transaction array COMPLETE\n");
//Creating copy of transaction table in the video card memmory
char* gpuT;
hipMalloc(&gpuT, size * (max + 1) * sizeof(char));
hipMemcpy(gpuT, cTable, (size * (max + 1) * sizeof(char)), hipMemcpyHostToDevice);
printf("\nSH: initializing cardinality '1' sets\n");
//Creates a frequency table of item sets with a Cardinality of 1; where the array index represents the item
//number. All the items have their counts initially set to zero
int * fTable = (int *)malloc((max + 1) * (cardinality + 1) * sizeof(int));
for(i = 0; i < max + 1; i++){
fTable[i * (cardinality + 1)] = i;
fTable[(i * (cardinality + 1)) + cardinality] = 0;
}
int* gpuF;
hipMalloc(&gpuF, (max + 1) * (cardinality + 1) * sizeof(int));
hipMemcpy(gpuF, fTable, (max + 1) * (cardinality + 1) * sizeof(int), hipMemcpyHostToDevice);
printf("SH: initialization of cardinality '1' sets COMPLETE\n");
printf("\nSH: counting of cardinality '1' sets\n");
//setting the number of cores to be used by the gpu
numBlocks = (max + 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
hipLaunchKernelGGL(( counting), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, gpuF, gpuT, size, (max + 1), (max + 1), cardinality);
printf("SH: counting of cardinality '1' sets COMPLETE\n");
printf("\nSH: removing item sets whose counts are below the support threshold\n");
//setting the number of cores to be used by the gpu
numBlocks = (max + 1) * (cardinality + 1)/ THREADS_PER_BLOCK + 1;
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
hipLaunchKernelGGL(( validSets), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, gpuF, cardinality, max + 1, mSupport);
printf("SH: removal of item sets COMPLETE\n");
hipMemcpy(fTable, gpuF, ((max + 1) * (cardinality + 1) * sizeof(int)), hipMemcpyDeviceToHost);
//invalidating elements that are below the support count and counting the remaining eligible elements
count = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
count++;
}
}
printf("\nRemaining items sets: %d\n", count);
//creating new table consisting of only valid items
int iTable[count];
j = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
iTable[j] = fTable[i * (cardinality + 1)];
j++;
}
}
//creating a tabel to hold the current valid items item and their the a variable for the count of the count
int * vTable = iTable;
int lastCount = count;
while(count > 1){
cardinality++;
printf("\nSH: initializating new cardinality '%d' sets\n", cardinality);
//temporary array that will hold the new item sets
int temp[nCr(count, cardinality) * (cardinality + 1)];
printf("SH: initialization of new cardinality '%d' sets COMPLETE\n", cardinality);
printf("\nSH: initializating old cardinality '%d' sets\n", cardinality - 1);
//array of previous items sets
int oldSets[nCr(lastCount, cardinality - 1) * cardinality];
//array that hold one old item set for insertion into table
int oldEntry[cardinality - 1];
printf("SH: initialization of old cardinality '%d' sets COMPLETE\n", cardinality - 1);
printf("\nSH: populating old cardinality '%d' sets\n", cardinality - 1);
//function populates old item set
k = 0;
if(cardinality - 1 <= lastCount){
for(i = 0; (oldEntry[i] = i) < cardinality - 2; i++);
for(i = 0; i < cardinality - 1; i++){
oldSets[(k * cardinality) + i] = vTable[oldEntry[i]];
}
k++;
for(;;){
for( i = cardinality - 2; i >= 0 && oldEntry[i] == (lastCount - (cardinality - 1) + i); i--);
if(i < 0){
break;
}
else{
oldEntry[i]++;
for(++i; i < cardinality - 1; i++){
oldEntry[i] = oldEntry[i - 1] + 1;
}
for(j = 0; j < cardinality - 1; j++){
oldSets[(k * cardinality) + j] = vTable[oldEntry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
oldSets[(i * cardinality) + cardinality - 1] = 0;
}
printf("SH: populating of old cardinality '%d' sets COMPLETE\n", cardinality - 1);
//array that will hold the information for a single item set before it is added to the
//array of all item sets
int entry[cardinality];
printf("\nSH: populating cardinality '%d' sets\n", cardinality);
//function populates new item set
k = 0;
if(cardinality <= count){
for(i = 0; (entry[i] = i) < cardinality - 1; i++);
for(i = 0; i < cardinality; i++){
temp[(k*(cardinality + 1)) + i] = vTable[entry[i]];
}
k++;
for(;;){
for( i = cardinality - 1; i >= 0 && entry[i] == (count - cardinality + i); i--);
if(i < 0){
break;
}
else{
entry[i]++;
for(++i; i < cardinality; i++){
entry[i] = entry[i - 1] + 1;
}
for(j = 0; j < cardinality; j++){
temp[(k*(cardinality + 1)) + j] = vTable[entry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(count, cardinality); i++){
temp[(i*(cardinality + 1)) + cardinality ] = 0;
}
printf("SH: populating of cardinality '%d' sets COMPLETE\n", cardinality);
printf("\nSH: counting cardinality '%d' sets\n", cardinality);
//counting the amount of instances of the item sets amongst the transactions
char * gpuV;
int * gpuSet;
hipMalloc(&gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality));
hipMemcpy(gpuSet, temp, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), hipMemcpyHostToDevice);
numBlocks = nCr(count, cardinality);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
hipLaunchKernelGGL(( counting), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, gpuSet, gpuT, size, max + 1, nCr(count, cardinality), cardinality);
hipMemcpy(temp, gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), hipMemcpyDeviceToHost);
hipFree(gpuSet);
printf("SH: counting of cardinality '%d' sets COMPLETE\n\n", cardinality);
printf("\nSH: counting old cardinality '%d' sets\n", cardinality - 1);
//counting the amount of instances of the item sets amongst the transactions
hipMalloc(&gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1));
hipMemcpy(gpuSet, oldSets, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), hipMemcpyHostToDevice);
numBlocks = nCr(lastCount, cardinality - 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
hipLaunchKernelGGL(( counting), dim3(numBlocks), dim3(THREADS_PER_BLOCK), 0, 0, gpuSet, gpuT, size, max + 1, nCr(lastCount, cardinality - 1), cardinality - 1);
hipMemcpy(oldSets, gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), hipMemcpyDeviceToHost);
hipFree(gpuSet);
printf("SH: counting of old cardinality '%d' sets COMPLETE\n\n", cardinality - 1);
for(i = 0; i <= cardinality; i++){
if(i == cardinality){
printf("Count\n");
}
else{
printf("Item '%d'\t", (i+1));
}
}
for(i = 0; i < nCr(count, cardinality); i ++){
for(j = 0; j <= cardinality; j++){
printf("%d\t\t", temp[(i*(cardinality + 1))+j]);
}
printf("\n");
}
printf("\nSH: removing item sets whose counts are below the support threshold\n");
//invalidating elements that are below the support count and counting the remaining eligible elements
int tCount = count;
lastCount = tCount;
count = 0;
for(i = 0; i < nCr(tCount, cardinality); i++){
if (temp[(i*(cardinality + 1)) + cardinality] < mSupport){
temp[(i * (cardinality + 1)) + cardinality] = 0;
}
else{
count++;
}
}
printf("SH: removal of item sets COMPLETE\n");
printf("\nRemaining items sets: %d\n", count);
//set Table of valid items
char valid[max + 1];
for(i = 0; i <= max; i++){
valid[i] = '\0';
}
for(i = 0; i < nCr(tCount, cardinality); i++){
for(j = 0; j < cardinality; j++){
if(temp[(i * (cardinality + 1)) + cardinality] > 0){
valid[temp[(i * (cardinality + 1)) + j]] = '1';
}
}
}
//creating new table consisting of only valid items
int rTable[count];
count = 0;
j = 0;
for(i = 0; i <= max; i++){
if (valid[i] == '1'){
rTable[j] = i;
j++;
count++;
}
}
vTable = rTable;
if(count == 0){
printf("\n----Most Frequent Item Sets----\n\n");
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
printf("Set: {");
}
for(j = 0; j < cardinality; j++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
if(j == cardinality - 1){
printf("}\t\tCount: %d\n", oldSets[(i * cardinality) + j]);
}
else{
printf("'%d'", oldSets[(i * cardinality) + j]);
}
}
}
}
printf("\n");
}
}
}
//factorial function
long int factorial(int x){
int count = x;
while (count > 1){
x = x * (count - 1);
count--;
}
if(x == 0){
x = 1;
}
return x;
}
//combinatorics function
long int nCr(int n, int r){
int y;
int z;
int w = n - 1;
int init = n;
int x;
if(r > (n-r)){
y = r;
}
else{
y = (n-r);
}
z = n - y;
while(z > 1){
n = n * w;
w--;
z--;
}
if( r > (init - r)){
x = n/factorial(init - r);
}
else{
x = n/factorial(r);
}
return x;
}
| 53d7a0ce1567c6541628c71a102d5699598f5421.cu | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#define THREADS_PER_BLOCK 1024
#define MAX_NUMBER_BLOCKS 2496
long int factorial(int x);
long int nCr(int n, int r);
__global__ void counting(int * fTable, char * tTable, int row, int col, int nCr, int cardinality){
__shared__ int cache[THREADS_PER_BLOCK]; //cache memory that is shared by all the threads within a block
int bIndex = blockIdx.x; //the index value of the core
int cacheIndex = threadIdx.x; //each thread within a core has a corresponding cache index where it stores its values
//enter a block loop where the core index must remain lower than the amount of item sets present in the frequency table
//at the end of each iteration the core index is increased by the amount of cores being used and loops again if possible
for(int h = bIndex; h < nCr; h+= gridDim.x){
int tIndex = threadIdx.x; //the index value of the individual thread
int sum = 0; //keeps track of how many times an item set has been found
int found; //a boolean value that indicates whether an item set is present within a transaction; either 0 or 1
//enter a thread loop where i represents which transaction being scanned. Each thread within a core scans a
// different transaction; the loop is necessary since there aren't enough threads for each transaction. Whenever
// a scan is done i is incremented by th number of threads per block
for(int i = tIndex; i < row; i+= blockDim.x){
found = 1;
//enter a loop where j represents the specific item within an item set; the iterations within the for loop
// is dependent on the cardinality of the item sets
for(int j = 0; j < cardinality; j++){
//if an item indicated in the frequency table is not found in the transaction found is set to 0; i.e. false
if(tTable[i * col + (fTable[bIndex * (cardinality + 1) + j])] != '1'){
found = 0;
}
}
//if found equals 1 then the sum variable is incremented by 1
if(found == 1){
sum++;
}
}
//once any given thread exits the thread the thread loop it stores its sum value to its corresponding cache index
cache[cacheIndex] = sum;
//the threads are synced before the overall sum is calculated to ensure all threads have finished counting;
__syncthreads();
//the cache is then reduced to obtain the total sum for any given item set every iteration adds two cache location
//together until the sum is stored at cache[0]
int k = THREADS_PER_BLOCK/2;
while(k != 0){
if(cacheIndex < k){
cache[cacheIndex] += cache[cacheIndex + k];
}
__syncthreads();
k /= 2;
}
//takes the overall of the item set for the core index that is monitoring this specific item set and enters it into the
//corresponding count column within the frequency table
if(cacheIndex == 0){
fTable[bIndex * (cardinality + 1) + cardinality] = cache[0];
}
__syncthreads();
//the core index value is incremented by the number of cores being used
bIndex += gridDim.x;
}
}
__global__ void validSets(int * fTable, int cardinality, int nCr, int mSupport){
int tIndex = blockIdx.x * blockDim.x + threadIdx.x;
if((tIndex < (cardinality + 1) * nCr) && (tIndex % (cardinality + 1) == cardinality)){
if(fTable[tIndex] < mSupport){
fTable[tIndex] = 0;
}
}
}
int main() {
//Variable declarations
FILE *fPointer;
char singleLine[100];
int max = 0; //Contains the largest integer occurence in the given database
int size = 0; //Contains the number of lines in the given database
int cardinality = 1; //Contains the initial cardinality of the item sets
int temp;
int i = 0;
int j, k, num, count;
int mSupport = 8000; //Contains the support count; set to approx 10% of all transactions
char val;
int numBlocks = 0;
//While loop that traverses through the database and returns the number of transactions
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%c", &val);
while(!feof(fPointer)){
if(val == '\n'){
size++;
}
fscanf(fPointer, "%c", &val);
}
fclose(fPointer);
printf("\nNumber of Transcations: %d\n", size);
fPointer = fopen("retail.dat", "r");
fscanf(fPointer, "%d", &temp);
printf("ID number of first item: %d\n", temp);
//Traverses through each transaction in order to find the max value.
while(!feof(fPointer)){
fscanf(fPointer, "%d", &temp);
if(max < temp){
max = temp;
}
}
fclose(fPointer);
printf("Largest ID number found: %d\n", max);
printf("\nSH: initializing transaction array\n");
//Creation of table
char *cTable = (char*)malloc(sizeof(char) * (max + 1) * size); //Allocates an array of characters for each transaction
for(i=0; i < (max+1)*size; i++) {
// memset(cTable[i], '\0', sizeof(char) * (max + 1) * size); //Initialize all values to 0.
cTable[i] = '\0';
}
printf("SH: initialization of transaction array COMPLETE\n");
printf("\nSH: populating transaction array\n");
char line[400];
char *cNum;
fPointer = fopen("retail.dat", "r");
for(i = 0; i < size; i++){
fgets(line, 400, fPointer);
cNum = strtok(line, " \n");
while(cNum != NULL){
num = atoi(cNum);
cTable[i * (max + 1) + num] = '1';
cNum = strtok(NULL, " \n");
}
}
printf("SH: populating transaction array COMPLETE\n");
//Creating copy of transaction table in the video card memmory
char* gpuT;
cudaMalloc(&gpuT, size * (max + 1) * sizeof(char));
cudaMemcpy(gpuT, cTable, (size * (max + 1) * sizeof(char)), cudaMemcpyHostToDevice);
printf("\nSH: initializing cardinality '1' sets\n");
//Creates a frequency table of item sets with a Cardinality of 1; where the array index represents the item
//number. All the items have their counts initially set to zero
int * fTable = (int *)malloc((max + 1) * (cardinality + 1) * sizeof(int));
for(i = 0; i < max + 1; i++){
fTable[i * (cardinality + 1)] = i;
fTable[(i * (cardinality + 1)) + cardinality] = 0;
}
int* gpuF;
cudaMalloc(&gpuF, (max + 1) * (cardinality + 1) * sizeof(int));
cudaMemcpy(gpuF, fTable, (max + 1) * (cardinality + 1) * sizeof(int), cudaMemcpyHostToDevice);
printf("SH: initialization of cardinality '1' sets COMPLETE\n");
printf("\nSH: counting of cardinality '1' sets\n");
//setting the number of cores to be used by the gpu
numBlocks = (max + 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuF, gpuT, size, (max + 1), (max + 1), cardinality);
printf("SH: counting of cardinality '1' sets COMPLETE\n");
printf("\nSH: removing item sets whose counts are below the support threshold\n");
//setting the number of cores to be used by the gpu
numBlocks = (max + 1) * (cardinality + 1)/ THREADS_PER_BLOCK + 1;
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
validSets<<< numBlocks, THREADS_PER_BLOCK>>>(gpuF, cardinality, max + 1, mSupport);
printf("SH: removal of item sets COMPLETE\n");
cudaMemcpy(fTable, gpuF, ((max + 1) * (cardinality + 1) * sizeof(int)), cudaMemcpyDeviceToHost);
//invalidating elements that are below the support count and counting the remaining eligible elements
count = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
count++;
}
}
printf("\nRemaining items sets: %d\n", count);
//creating new table consisting of only valid items
int iTable[count];
j = 0;
for(i = 0; i < (max + 1); i++){
if (fTable[i * (cardinality + 1) + cardinality] != 0){
iTable[j] = fTable[i * (cardinality + 1)];
j++;
}
}
//creating a tabel to hold the current valid items item and their the a variable for the count of the count
int * vTable = iTable;
int lastCount = count;
while(count > 1){
cardinality++;
printf("\nSH: initializating new cardinality '%d' sets\n", cardinality);
//temporary array that will hold the new item sets
int temp[nCr(count, cardinality) * (cardinality + 1)];
printf("SH: initialization of new cardinality '%d' sets COMPLETE\n", cardinality);
printf("\nSH: initializating old cardinality '%d' sets\n", cardinality - 1);
//array of previous items sets
int oldSets[nCr(lastCount, cardinality - 1) * cardinality];
//array that hold one old item set for insertion into table
int oldEntry[cardinality - 1];
printf("SH: initialization of old cardinality '%d' sets COMPLETE\n", cardinality - 1);
printf("\nSH: populating old cardinality '%d' sets\n", cardinality - 1);
//function populates old item set
k = 0;
if(cardinality - 1 <= lastCount){
for(i = 0; (oldEntry[i] = i) < cardinality - 2; i++);
for(i = 0; i < cardinality - 1; i++){
oldSets[(k * cardinality) + i] = vTable[oldEntry[i]];
}
k++;
for(;;){
for( i = cardinality - 2; i >= 0 && oldEntry[i] == (lastCount - (cardinality - 1) + i); i--);
if(i < 0){
break;
}
else{
oldEntry[i]++;
for(++i; i < cardinality - 1; i++){
oldEntry[i] = oldEntry[i - 1] + 1;
}
for(j = 0; j < cardinality - 1; j++){
oldSets[(k * cardinality) + j] = vTable[oldEntry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
oldSets[(i * cardinality) + cardinality - 1] = 0;
}
printf("SH: populating of old cardinality '%d' sets COMPLETE\n", cardinality - 1);
//array that will hold the information for a single item set before it is added to the
//array of all item sets
int entry[cardinality];
printf("\nSH: populating cardinality '%d' sets\n", cardinality);
//function populates new item set
k = 0;
if(cardinality <= count){
for(i = 0; (entry[i] = i) < cardinality - 1; i++);
for(i = 0; i < cardinality; i++){
temp[(k*(cardinality + 1)) + i] = vTable[entry[i]];
}
k++;
for(;;){
for( i = cardinality - 1; i >= 0 && entry[i] == (count - cardinality + i); i--);
if(i < 0){
break;
}
else{
entry[i]++;
for(++i; i < cardinality; i++){
entry[i] = entry[i - 1] + 1;
}
for(j = 0; j < cardinality; j++){
temp[(k*(cardinality + 1)) + j] = vTable[entry[j]];
}
k++;
}
}
}
for(i = 0; i < nCr(count, cardinality); i++){
temp[(i*(cardinality + 1)) + cardinality ] = 0;
}
printf("SH: populating of cardinality '%d' sets COMPLETE\n", cardinality);
printf("\nSH: counting cardinality '%d' sets\n", cardinality);
//counting the amount of instances of the item sets amongst the transactions
char * gpuV;
int * gpuSet;
cudaMalloc(&gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality));
cudaMemcpy(gpuSet, temp, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), cudaMemcpyHostToDevice);
numBlocks = nCr(count, cardinality);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuSet, gpuT, size, max + 1, nCr(count, cardinality), cardinality);
cudaMemcpy(temp, gpuSet, sizeof(int) * (cardinality + 1) * nCr(count, cardinality), cudaMemcpyDeviceToHost);
cudaFree(gpuSet);
printf("SH: counting of cardinality '%d' sets COMPLETE\n\n", cardinality);
printf("\nSH: counting old cardinality '%d' sets\n", cardinality - 1);
//counting the amount of instances of the item sets amongst the transactions
cudaMalloc(&gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1));
cudaMemcpy(gpuSet, oldSets, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), cudaMemcpyHostToDevice);
numBlocks = nCr(lastCount, cardinality - 1);
if(numBlocks > MAX_NUMBER_BLOCKS){
numBlocks = MAX_NUMBER_BLOCKS;
}
counting<<< numBlocks, THREADS_PER_BLOCK>>>(gpuSet, gpuT, size, max + 1, nCr(lastCount, cardinality - 1), cardinality - 1);
cudaMemcpy(oldSets, gpuSet, sizeof(int) * cardinality * nCr(lastCount, cardinality - 1), cudaMemcpyDeviceToHost);
cudaFree(gpuSet);
printf("SH: counting of old cardinality '%d' sets COMPLETE\n\n", cardinality - 1);
for(i = 0; i <= cardinality; i++){
if(i == cardinality){
printf("Count\n");
}
else{
printf("Item '%d'\t", (i+1));
}
}
for(i = 0; i < nCr(count, cardinality); i ++){
for(j = 0; j <= cardinality; j++){
printf("%d\t\t", temp[(i*(cardinality + 1))+j]);
}
printf("\n");
}
printf("\nSH: removing item sets whose counts are below the support threshold\n");
//invalidating elements that are below the support count and counting the remaining eligible elements
int tCount = count;
lastCount = tCount;
count = 0;
for(i = 0; i < nCr(tCount, cardinality); i++){
if (temp[(i*(cardinality + 1)) + cardinality] < mSupport){
temp[(i * (cardinality + 1)) + cardinality] = 0;
}
else{
count++;
}
}
printf("SH: removal of item sets COMPLETE\n");
printf("\nRemaining items sets: %d\n", count);
//set Table of valid items
char valid[max + 1];
for(i = 0; i <= max; i++){
valid[i] = '\0';
}
for(i = 0; i < nCr(tCount, cardinality); i++){
for(j = 0; j < cardinality; j++){
if(temp[(i * (cardinality + 1)) + cardinality] > 0){
valid[temp[(i * (cardinality + 1)) + j]] = '1';
}
}
}
//creating new table consisting of only valid items
int rTable[count];
count = 0;
j = 0;
for(i = 0; i <= max; i++){
if (valid[i] == '1'){
rTable[j] = i;
j++;
count++;
}
}
vTable = rTable;
if(count == 0){
printf("\n----Most Frequent Item Sets----\n\n");
for(i = 0; i < nCr(lastCount, cardinality - 1); i++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
printf("Set: {");
}
for(j = 0; j < cardinality; j++){
if(oldSets[(i * cardinality) + (cardinality-1)] > mSupport){
if(j == cardinality - 1){
printf("}\t\tCount: %d\n", oldSets[(i * cardinality) + j]);
}
else{
printf("'%d'", oldSets[(i * cardinality) + j]);
}
}
}
}
printf("\n");
}
}
}
//factorial function
long int factorial(int x){
int count = x;
while (count > 1){
x = x * (count - 1);
count--;
}
if(x == 0){
x = 1;
}
return x;
}
//combinatorics function
long int nCr(int n, int r){
int y;
int z;
int w = n - 1;
int init = n;
int x;
if(r > (n-r)){
y = r;
}
else{
y = (n-r);
}
z = n - y;
while(z > 1){
n = n * w;
w--;
z--;
}
if( r > (init - r)){
x = n/factorial(init - r);
}
else{
x = n/factorial(r);
}
return x;
}
|
86881b1a5c20c5a5b04b6b501fdadf2e027590c3.hip | // !!! This is a file automatically generated by hipify!!!
/*
Copyright (c) 2014, Evghenii Gaburov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CUDA_
#error "Something went wrong..."
#endif
void ispc_malloc(void **ptr, const size_t size)
{
hipMallocManaged(ptr, size);
}
void ispc_free(void *ptr)
{
hipFree(ptr);
}
void ispc_memset(void *ptr, int value, size_t size)
{
hipMemset(ptr, value, size);
}
void ispcSetMallocHeapLimit(size_t value)
{
hipDeviceSetLimit(hipLimitMallocHeapSize,value);
}
void ispcSetStackLimit(size_t value)
{
hipDeviceSetLimit(hipLimitStackSize,value);
}
unsigned long long ispcGetMallocHeapLimit()
{
size_t value;
hipDeviceGetLimit(&value, hipLimitMallocHeapSize);
return value;
}
unsigned long long ispcGetStackLimit()
{
size_t value;
hipDeviceGetLimit(&value, hipLimitStackSize);
return value;
}
void * ispcMemcpy(void *dest, void *src, size_t num)
{
hipMemcpy(dest, src, num, hipMemcpyDefault);
return dest;
}
| 86881b1a5c20c5a5b04b6b501fdadf2e027590c3.cu | /*
Copyright (c) 2014, Evghenii Gaburov
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of Intel Corporation nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _CUDA_
#error "Something went wrong..."
#endif
void ispc_malloc(void **ptr, const size_t size)
{
cudaMallocManaged(ptr, size);
}
void ispc_free(void *ptr)
{
cudaFree(ptr);
}
void ispc_memset(void *ptr, int value, size_t size)
{
cudaMemset(ptr, value, size);
}
void ispcSetMallocHeapLimit(size_t value)
{
cudaDeviceSetLimit(cudaLimitMallocHeapSize,value);
}
void ispcSetStackLimit(size_t value)
{
cudaDeviceSetLimit(cudaLimitStackSize,value);
}
unsigned long long ispcGetMallocHeapLimit()
{
size_t value;
cudaDeviceGetLimit(&value, cudaLimitMallocHeapSize);
return value;
}
unsigned long long ispcGetStackLimit()
{
size_t value;
cudaDeviceGetLimit(&value, cudaLimitStackSize);
return value;
}
void * ispcMemcpy(void *dest, void *src, size_t num)
{
cudaMemcpy(dest, src, num, cudaMemcpyDefault);
return dest;
}
|
31a90d47b4ab9daeb4076c9d8e68a8a64fe1e225.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <iostream>
// Matrix transpose kernel - Consider 1 block contain all the elements in 1 row of the Matrix
// So 1024 is the maximum number or elements should contain in a row
// IMPORTANT : Assuming grid is 1D and nx=ny
//
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
}
// Copy the input matrix elements to output matrix with coalesed access and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_rows(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*ny + ix] = input[iy*nx + ix];
}
}
//// Copy the input matrix elements to output matrix with stride and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_columns(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[ix*ny + iy];
}
}
// Read the elements from input matrix in coalesed manner and write to output matrix in stride manner
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
}
__global__ void read_stride_write_coaleased_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*nx + ix] = input[ix*ny + iy];
}
}
void run_matrix_transpose_k1()
{
int ny = 1<<15;
int nx = 1 << 15;
int blockx = 128;
int blocky = 128;
float * h_matrix, *h_output;
int mat_size = nx * ny;
int mat_byte_size = sizeof(int)*mat_size;
h_matrix = (float*)malloc(mat_byte_size);
h_output = (float*)malloc(mat_byte_size);
for (int i = 0; i < mat_size; i++)
{
h_matrix[i] = i;
}
printf("Printing input matrix \n");
//for (int i = 0; i < mat_size; i+=32)
//{
// if (i != 0 && i%nx == 0)
// {
// printf("\n");
// }
// printf(" %1.0f ", h_matrix[i]);
//}
//printf("\n");
dim3 grid(blockx,blocky);
dim3 blocks((nx+blockx-1)/blockx, (ny + blocky - 1) / blocky);
float * d_matrix, *d_output;
hipMalloc((float**)&d_matrix,mat_byte_size);
hipMalloc((float**)&d_output, mat_byte_size);
hipMemcpy(d_matrix,h_matrix,mat_byte_size,hipMemcpyHostToDevice);
copy_rows << <grid,blocks >> > (d_matrix,d_output,nx,ny);
hipDeviceSynchronize();
hipMemcpy(h_output,d_output,mat_byte_size,hipMemcpyDeviceToHost);
printf("Printing output matrix \n");
/*
for (int i = 0; i < ny; i++)
{
if (i != 0 && i%ny == 0)
{
printf("\n");
}
printf(" %1.0f ",h_output[i]);
}
printf("\n");*/
}
//int main()
//{
// run_matrix_transpose_k1();
// system("pause");
// return 0;
//}
| 31a90d47b4ab9daeb4076c9d8e68a8a64fe1e225.cu | #include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <cuda.h>
#include <stdio.h>
#include <iostream>
// Matrix transpose kernel - Consider 1 block contain all the elements in 1 row of the Matrix
// So 1024 is the maximum number or elements should contain in a row
// IMPORTANT : Assuming grid is 1D and nx=ny
//
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void matrix_transpose_k1(float* input,float* output,const int nx, const int ny)
{
int gid = blockDim.x * blockIdx.x + threadIdx.x;
int offset = threadIdx.x*blockDim.x;
//printf("gid : %d , offset : %d , index : %d ,value : %f \n", gid, offset, offset + blockIdx.x,input[offset + blockIdx.x]);
output[gid] = input[offset + blockIdx.x];
}
// Copy the input matrix elements to output matrix with coalesed access and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_rows(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*ny + ix] = input[iy*nx + ix];
}
}
//// Copy the input matrix elements to output matrix with stride and write it same way
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void copy_columns(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[ix*ny + iy];
}
}
// Read the elements from input matrix in coalesed manner and write to output matrix in stride manner
// param 1 (input) - input matrix in 1D array
// param 2 (output) - tranpose of the input matrix in 1D array
// param 3 (nx) - number of elements in input matrix's row
// param 4 (ny) - number of elements in input matrix's column
__global__ void read_coaleased_write_stride_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[ix*ny + iy] = input[iy*nx + ix];
}
}
__global__ void read_stride_write_coaleased_mat_trans(float* input, float* output, const int nx, const int ny)
{
int ix = blockIdx.x * blockDim.x + threadIdx.x;
int iy = blockIdx.y * blockDim.y + threadIdx.y;
if (ix < nx && iy < ny)
{
output[iy*nx + ix] = input[ix*ny + iy];
}
}
void run_matrix_transpose_k1()
{
int ny = 1<<15;
int nx = 1 << 15;
int blockx = 128;
int blocky = 128;
float * h_matrix, *h_output;
int mat_size = nx * ny;
int mat_byte_size = sizeof(int)*mat_size;
h_matrix = (float*)malloc(mat_byte_size);
h_output = (float*)malloc(mat_byte_size);
for (int i = 0; i < mat_size; i++)
{
h_matrix[i] = i;
}
printf("Printing input matrix \n");
//for (int i = 0; i < mat_size; i+=32)
//{
// if (i != 0 && i%nx == 0)
// {
// printf("\n");
// }
// printf(" %1.0f ", h_matrix[i]);
//}
//printf("\n");
dim3 grid(blockx,blocky);
dim3 blocks((nx+blockx-1)/blockx, (ny + blocky - 1) / blocky);
float * d_matrix, *d_output;
cudaMalloc((float**)&d_matrix,mat_byte_size);
cudaMalloc((float**)&d_output, mat_byte_size);
cudaMemcpy(d_matrix,h_matrix,mat_byte_size,cudaMemcpyHostToDevice);
copy_rows << <grid,blocks >> > (d_matrix,d_output,nx,ny);
cudaDeviceSynchronize();
cudaMemcpy(h_output,d_output,mat_byte_size,cudaMemcpyDeviceToHost);
printf("Printing output matrix \n");
/*
for (int i = 0; i < ny; i++)
{
if (i != 0 && i%ny == 0)
{
printf("\n");
}
printf(" %1.0f ",h_output[i]);
}
printf("\n");*/
}
//int main()
//{
// run_matrix_transpose_k1();
// system("pause");
// return 0;
//}
|
12cd0b09ef9a4ac3507ce7c525de35b326650258.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_right [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_right_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_right_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(-2,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(-2,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[0][0] * dims_update_halo_kernel2_yvel_plus_2_right[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[1][0] * dims_update_halo_kernel2_yvel_plus_2_right[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_right[0][0], dims_update_halo_kernel2_yvel_plus_2_right[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_right[1][0], dims_update_halo_kernel2_yvel_plus_2_right[1][1], arg1);
update_halo_kernel2_yvel_plus_2_right_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,43)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(43,"update_halo_kernel2_yvel_plus_2_right");
OPS_kernels[43].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_right_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_right_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_right_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_right_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_right_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_right_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_right_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_right_h[1][1] = ydim1;
cutilSafeCall(hipMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_right, dims_update_halo_kernel2_yvel_plus_2_right_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_right)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
hipLaunchKernelGGL(( ops_update_halo_kernel2_yvel_plus_2_right), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[43].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 43;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 43;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(43,"update_halo_kernel2_yvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
| 12cd0b09ef9a4ac3507ce7c525de35b326650258.cu | //
// auto-generated by ops.py
//
__constant__ int dims_update_halo_kernel2_yvel_plus_2_right [3][2];
static int dims_update_halo_kernel2_yvel_plus_2_right_h [3][2] = {0};
//user function
__device__
inline void update_halo_kernel2_yvel_plus_2_right_gpu(ACC<double> &yvel0,
ACC<double> &yvel1,
const int* fields)
{
if(fields[FIELD_YVEL0] == 1) yvel0(0,0,0) = yvel0(-2,0,0);
if(fields[FIELD_YVEL1] == 1) yvel1(0,0,0) = yvel1(-2,0,0);
}
__global__ void ops_update_halo_kernel2_yvel_plus_2_right(
double* __restrict arg0,
double* __restrict arg1,
const int* __restrict arg2,
int size0,
int size1,
int size2 ){
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[0][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[0][0] * dims_update_halo_kernel2_yvel_plus_2_right[0][1];
arg1 += idx_x * 1*1 + idx_y * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[1][0] + idx_z * 1*1 * dims_update_halo_kernel2_yvel_plus_2_right[1][0] * dims_update_halo_kernel2_yvel_plus_2_right[1][1];
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
ACC<double> argp0(dims_update_halo_kernel2_yvel_plus_2_right[0][0], dims_update_halo_kernel2_yvel_plus_2_right[0][1], arg0);
ACC<double> argp1(dims_update_halo_kernel2_yvel_plus_2_right[1][0], dims_update_halo_kernel2_yvel_plus_2_right[1][1], arg1);
update_halo_kernel2_yvel_plus_2_right_gpu(argp0, argp1, arg2);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
#else
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[3] = { arg0, arg1, arg2};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,3,range,43)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(43,"update_halo_kernel2_yvel_plus_2_right");
OPS_kernels[43].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[3];
int end[3];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[3];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 3,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<3; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
if (xdim0 != dims_update_halo_kernel2_yvel_plus_2_right_h[0][0] || ydim0 != dims_update_halo_kernel2_yvel_plus_2_right_h[0][1] || xdim1 != dims_update_halo_kernel2_yvel_plus_2_right_h[1][0] || ydim1 != dims_update_halo_kernel2_yvel_plus_2_right_h[1][1]) {
dims_update_halo_kernel2_yvel_plus_2_right_h[0][0] = xdim0;
dims_update_halo_kernel2_yvel_plus_2_right_h[0][1] = ydim0;
dims_update_halo_kernel2_yvel_plus_2_right_h[1][0] = xdim1;
dims_update_halo_kernel2_yvel_plus_2_right_h[1][1] = ydim1;
cutilSafeCall(cudaMemcpyToSymbol( dims_update_halo_kernel2_yvel_plus_2_right, dims_update_halo_kernel2_yvel_plus_2_right_h, sizeof(dims_update_halo_kernel2_yvel_plus_2_right)));
}
int *arg2h = (int *)arg2.data;
int x_size = MAX(0,end[0]-start[0]);
int y_size = MAX(0,end[1]-start[1]);
int z_size = MAX(0,end[2]-start[2]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1);
dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z);
int consts_bytes = 0;
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
reallocConstArrays(consts_bytes);
consts_bytes = 0;
arg2.data = OPS_consts_h + consts_bytes;
arg2.data_d = OPS_consts_d + consts_bytes;
for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d];
consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int));
mvConstArraysToDevice(consts_bytes);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
char *p_a[3];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
(start[1] * args[0].stencil->stride[1]);
base0 = base0+ dat0 *
args[0].dat->size[0] *
args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
(start[1] * args[1].stencil->stride[1]);
base1 = base1+ dat1 *
args[1].dat->size[0] *
args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 3);
ops_halo_exchanges(args,3,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0 && y_size > 0 && z_size > 0)
ops_update_halo_kernel2_yvel_plus_2_right<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(int *)arg2.data_d,x_size, y_size, z_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[43].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 3);
ops_set_halo_dirtybit3(&args[0],range);
ops_set_halo_dirtybit3(&args[1],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[43].mpi_time += t2-t1;
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[43].transfer += ops_compute_transfer(dim, start, end, &arg1);
}
}
#ifdef OPS_LAZY
void ops_par_loop_update_halo_kernel2_yvel_plus_2_right(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 43;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 43;
for ( int i=0; i<6; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 3;
desc->args = (ops_arg*)malloc(3*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
char *tmp = (char*)malloc(NUM_FIELDS*sizeof(int));
memcpy(tmp, arg2.data,NUM_FIELDS*sizeof(int));
desc->args[2].data = tmp;
desc->function = ops_par_loop_update_halo_kernel2_yvel_plus_2_right_execute;
if (OPS_diags > 1) {
ops_timing_realloc(43,"update_halo_kernel2_yvel_plus_2_right");
}
ops_enqueue_kernel(desc);
}
#endif
|
38918ce4cbdc20d4f1c908e012781da4100bdf34.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "gpu_maskPointCloud.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *verts = NULL;
hipMalloc(&verts, XSIZE*YSIZE);
const int width = 1;
const int height = 1;
const int *mask = NULL;
hipMalloc(&mask, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
gpu_maskPointCloud), dim3(gridBlock),dim3(threadBlock), 0, 0, verts,width,height,mask);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
gpu_maskPointCloud), dim3(gridBlock),dim3(threadBlock), 0, 0, verts,width,height,mask);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
gpu_maskPointCloud), dim3(gridBlock),dim3(threadBlock), 0, 0, verts,width,height,mask);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | 38918ce4cbdc20d4f1c908e012781da4100bdf34.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "gpu_maskPointCloud.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float4 *verts = NULL;
cudaMalloc(&verts, XSIZE*YSIZE);
const int width = 1;
const int height = 1;
const int *mask = NULL;
cudaMalloc(&mask, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
gpu_maskPointCloud<<<gridBlock,threadBlock>>>(verts,width,height,mask);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
gpu_maskPointCloud<<<gridBlock,threadBlock>>>(verts,width,height,mask);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
gpu_maskPointCloud<<<gridBlock,threadBlock>>>(verts,width,height,mask);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
5c1f16e82254104eacbe046a649e26359f5a842d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
//PCG
namespace caffe {
void fft2_third(hipfftHandle forward_plan, float* d_in, float2* d_freq)
{
hipfftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2_third(hipfftHandle inverse_plan, float2* d_freq, float* d_out)
{
hipfftExecC2R(inverse_plan, d_freq, d_out);
}
template <typename Dtype>
__global__ void ifftshift_third(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift_third(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//
}
}
}
template <typename Dtype>
__global__ void set_zeros(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag) {
CUDA_KERNEL_LOOP(index, n) {
rhs_samplef_real[index]=0;
rhs_samplef_imag[index]=0;
}
}
template <typename Dtype>
__global__ void weight_samples(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag, Dtype* samplesf_real, Dtype* samplesf_imag, Dtype* sample_weight, int sample_num,
int num_per_sample_imag) {
CUDA_KERNEL_LOOP(index, n) {
int index1;
for(int sample_id=0; sample_id<sample_num; sample_id++)
{
index1=num_per_sample_imag*sample_id+index;
rhs_samplef_real[index]=rhs_samplef_real[index]+sample_weight[sample_id]*samplesf_real[index1];
rhs_samplef_imag[index]=rhs_samplef_imag[index]+sample_weight[sample_id]*samplesf_imag[index1];
}
}
}
template <typename Dtype>
__global__ void comput_xf_yf(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag, Dtype* yf_real, Dtype* yf_imag, int num_per_channel_imag) {
CUDA_KERNEL_LOOP(index, n) {
int index1=index%num_per_channel_imag;
rhs_samplef_real[index]=rhs_samplef_real[index]*yf_real[index1]-rhs_samplef_imag[index]*yf_imag[index1];
rhs_samplef_imag[index]=rhs_samplef_real[index]*yf_imag[index1]+rhs_samplef_imag[index]*yf_real[index1];
}
}
template <typename Dtype>
__global__ void add_mask_third(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
void WtfthirdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* data=Layer<Dtype>::feature_num[0]->mutable_cpu_data();
int feature_num=data[0];
Dtype* resolution_data=Layer<Dtype>::resolution_index[0]->mutable_cpu_data();
int resolution_index=resolution_data[0];
Dtype* sample_weight=Layer<Dtype>::sample_weight[0]->mutable_gpu_data();
Dtype* sample_weight_cpu=Layer<Dtype>::sample_weight[0]->mutable_cpu_data();
Dtype* rhs_samplef_real; Dtype* rhs_samplef_imag; Dtype* samplesf_real; Dtype* samplesf_imag;
int col_num, row_num, channel_num, sample_num, num_per_sample_imag, num_per_channel_imag, num_per_channel_real;
sample_num=Layer<Dtype>::first_layer_samplef_real[0]->num();
Dtype* fftshift_mask; Dtype* ifftshift_mask;
Dtype* binary_mask;
int count1, count2;
printf("we get here1 %d\n",feature_num);
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
rhs_samplef_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
rhs_samplef_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
count1=Layer<Dtype>::first_layer_hf_real[blob_id]->count();
hipLaunchKernelGGL(( set_zeros), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, rhs_samplef_real, rhs_samplef_imag);
samplesf_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
samplesf_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_samplef_real[blob_id]->width();
row_num=Layer<Dtype>::first_layer_samplef_real[blob_id]->height();
num_per_sample_imag=col_num*row_num*Layer<Dtype>::first_layer_hf_real[blob_id]->channels();
hipLaunchKernelGGL(( weight_samples), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, rhs_samplef_real, rhs_samplef_imag, samplesf_real, samplesf_imag, sample_weight, sample_num, num_per_sample_imag);
Dtype* yf_real=Layer<Dtype>::first_layer_yf_real[0]->mutable_gpu_data();
Dtype* yf_imag=Layer<Dtype>::first_layer_yf_imag[0]->mutable_gpu_data();
num_per_channel_imag=row_num*col_num;
hipLaunchKernelGGL(( comput_xf_yf), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, rhs_samplef_real, rhs_samplef_imag, yf_real, yf_imag, num_per_channel_imag);
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
hipLaunchKernelGGL(( ifftshift_third), dim3(CAFFE_GET_BLOCKS(count1)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count1, num_per_channel_imag, ifftshift_mask, Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel_imag);
ifft2_third(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
binary_mask=Layer<Dtype>::binary_mask[0]->mutable_gpu_data();
count2=row_num*row_num*Layer<Dtype>::first_layer_hf_real[blob_id]->channels();
num_per_channel_real=row_num*row_num;
hipLaunchKernelGGL(( add_mask_third), dim3(CAFFE_GET_BLOCKS(count2)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count2, num_per_channel_real, binary_mask, this->d_in2, this->d_in_tmp2);
if(blob_id==0)
{
printf("we get here\n");
top[0]->Reshape(1,Layer<Dtype>::first_layer_hf_real[blob_id]->channels(),Layer<Dtype>::first_layer_hf_real[blob_id]->height(),Layer<Dtype>::first_layer_hf_real[blob_id]->width());
caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
}
}
else
{
}
}
Dtype* clear_memory_cpu=Layer<Dtype>::clear_memory[0]->mutable_cpu_data();
if(clear_memory_cpu[0]>0.5) //memory
{
hipFree(this->d_in1); hipFree(this->d_in2); hipFree(this->d_in3); hipFree(this->d_in4);
hipFree(this->d_in_tmp1); hipFree(this->d_in_tmp2); hipFree(this->d_in_tmp3); hipFree(this->d_in_tmp4);
hipFree(this->d_freq1); hipFree(this->d_freq2); hipFree(this->d_freq3); hipFree(this->d_freq4);
hipfftDestroy(this->forward_plan[0]); hipfftDestroy(this->forward_plan[1]); hipfftDestroy(this->forward_plan[2]); hipfftDestroy(this->forward_plan[3]);
if(feature_num==5)
{
hipFree(this->d_in5);
hipFree(this->d_in_tmp5);
hipfftDestroy(this->forward_plan[4]);
hipfftDestroy(this->inverse_plan[4]);
}
}
}
template <typename Dtype>
void WtfthirdLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "start of convolutionlayer backward_gpu";
//CHECK((this->kstride_h_ == 1) && (this->kstride_w_ == 1)) << "Backward_gpu is not implemented for fully convolutin."
//LOG(INFO) << "end of convolutionlayer backward_gpu";
}
INSTANTIATE_LAYER_GPU_FUNCS(WtfthirdLayer);
} // namespace caffe
| 5c1f16e82254104eacbe046a649e26359f5a842d.cu | #include <vector>
#include "caffe/filler.hpp"
#include "caffe/layer.hpp"
#include "caffe/util/im2col.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
//该函数用于实现PCG的功能
namespace caffe {
void fft2_third(cufftHandle forward_plan, float* d_in, float2* d_freq)
{
cufftExecR2C(forward_plan, d_in, d_freq);
}
void ifft2_third(cufftHandle inverse_plan, float2* d_freq, float* d_out)
{
cufftExecC2R(inverse_plan, d_freq, d_out);
}
template <typename Dtype>
__global__ void ifftshift_third(const int n, int num_per_channel, Dtype* L_mask, Dtype* input_real, Dtype* input_imag, float2* output, int row_num, int col_num,int num_per_channel1) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>0)
{int ori_index=L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=input_imag[ori_index];
}
else
{ int ori_index=-L_mask[current_index]-1+channel_index*num_per_channel1;
output[index].x=input_real[ori_index];
output[index].y=-input_imag[ori_index];
}
}
}
template <typename Dtype>
__global__ void fftshift_third(const int n, int num_per_channel1, Dtype* L_mask, float2* input, Dtype* output_real, Dtype* output_imag) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel1;
int current_index=index%num_per_channel1;
if(L_mask[current_index]>-0.5)
{
int ori_index=L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=input[ori_index].y;
// output_real[index]=ori_index;
// output_imag[index]=ori_index;
}
else
{
int ori_index=-L_mask[current_index]+channel_index*num_per_channel1;
output_real[index]=input[ori_index].x;
output_imag[index]=-input[ori_index].y;//复数域求共轭操作
//output_real[index]=ori_index;
//output_imag[index]=-ori_index;//复数域求共轭操作
}
}
}
template <typename Dtype>
__global__ void set_zeros(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag) {
CUDA_KERNEL_LOOP(index, n) {
rhs_samplef_real[index]=0;
rhs_samplef_imag[index]=0;
}
}
template <typename Dtype>
__global__ void weight_samples(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag, Dtype* samplesf_real, Dtype* samplesf_imag, Dtype* sample_weight, int sample_num,
int num_per_sample_imag) {
CUDA_KERNEL_LOOP(index, n) {
int index1;
for(int sample_id=0; sample_id<sample_num; sample_id++)
{
index1=num_per_sample_imag*sample_id+index;
rhs_samplef_real[index]=rhs_samplef_real[index]+sample_weight[sample_id]*samplesf_real[index1];
rhs_samplef_imag[index]=rhs_samplef_imag[index]+sample_weight[sample_id]*samplesf_imag[index1];
}
}
}
template <typename Dtype>
__global__ void comput_xf_yf(const int n, Dtype* rhs_samplef_real, Dtype* rhs_samplef_imag, Dtype* yf_real, Dtype* yf_imag, int num_per_channel_imag) {
CUDA_KERNEL_LOOP(index, n) {
int index1=index%num_per_channel_imag;
rhs_samplef_real[index]=rhs_samplef_real[index]*yf_real[index1]-rhs_samplef_imag[index]*yf_imag[index1];
rhs_samplef_imag[index]=rhs_samplef_real[index]*yf_imag[index1]+rhs_samplef_imag[index]*yf_real[index1];
}
}
template <typename Dtype>
__global__ void add_mask_third(const int n, int num_per_channel, Dtype* mask, float* input, float * output) {
CUDA_KERNEL_LOOP(index, n) {
int channel_index=index/num_per_channel;
int current_index=index%num_per_channel;
output[index]=input[index]*mask[current_index];
}
}
template <typename Dtype>
void WtfthirdLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* data=Layer<Dtype>::feature_num[0]->mutable_cpu_data();
int feature_num=data[0];
Dtype* resolution_data=Layer<Dtype>::resolution_index[0]->mutable_cpu_data();
int resolution_index=resolution_data[0];
Dtype* sample_weight=Layer<Dtype>::sample_weight[0]->mutable_gpu_data();
Dtype* sample_weight_cpu=Layer<Dtype>::sample_weight[0]->mutable_cpu_data();
Dtype* rhs_samplef_real; Dtype* rhs_samplef_imag; Dtype* samplesf_real; Dtype* samplesf_imag;
int col_num, row_num, channel_num, sample_num, num_per_sample_imag, num_per_channel_imag, num_per_channel_real;
sample_num=Layer<Dtype>::first_layer_samplef_real[0]->num();
Dtype* fftshift_mask; Dtype* ifftshift_mask;
Dtype* binary_mask;
int count1, count2;
printf("we get here1 %d\n",feature_num);
for(int blob_id=0; blob_id<feature_num;blob_id++)
{
if(blob_id!=2)
{
rhs_samplef_real=Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data();
rhs_samplef_imag=Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data();
count1=Layer<Dtype>::first_layer_hf_real[blob_id]->count();
set_zeros<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, rhs_samplef_real, rhs_samplef_imag);
samplesf_real=Layer<Dtype>::first_layer_samplef_real[blob_id]->mutable_gpu_data();
samplesf_imag=Layer<Dtype>::first_layer_samplef_imag[blob_id]->mutable_gpu_data();
col_num=Layer<Dtype>::first_layer_samplef_real[blob_id]->width();
row_num=Layer<Dtype>::first_layer_samplef_real[blob_id]->height();
num_per_sample_imag=col_num*row_num*Layer<Dtype>::first_layer_hf_real[blob_id]->channels();
weight_samples<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, rhs_samplef_real, rhs_samplef_imag, samplesf_real, samplesf_imag, sample_weight, sample_num, num_per_sample_imag);
Dtype* yf_real=Layer<Dtype>::first_layer_yf_real[0]->mutable_gpu_data();
Dtype* yf_imag=Layer<Dtype>::first_layer_yf_imag[0]->mutable_gpu_data();
num_per_channel_imag=row_num*col_num;
comput_xf_yf<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, rhs_samplef_real, rhs_samplef_imag, yf_real, yf_imag, num_per_channel_imag);
ifftshift_mask=Layer<Dtype>::ifftshift_mask[0]->mutable_gpu_data();
ifftshift_third<<<CAFFE_GET_BLOCKS(count1), CAFFE_CUDA_NUM_THREADS>>>(count1, num_per_channel_imag, ifftshift_mask, Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data() , Layer<Dtype>::first_layer_hf_imag[blob_id]->mutable_gpu_data(), this->d_freq2,row_num, col_num,num_per_channel_imag);
ifft2_third(this->inverse_plan[blob_id],this->d_freq2,this->d_in2);
binary_mask=Layer<Dtype>::binary_mask[0]->mutable_gpu_data();
count2=row_num*row_num*Layer<Dtype>::first_layer_hf_real[blob_id]->channels();
num_per_channel_real=row_num*row_num;
add_mask_third<<<CAFFE_GET_BLOCKS(count2), CAFFE_CUDA_NUM_THREADS>>>(count2, num_per_channel_real, binary_mask, this->d_in2, this->d_in_tmp2);
if(blob_id==0)
{
printf("we get here\n");
top[0]->Reshape(1,Layer<Dtype>::first_layer_hf_real[blob_id]->channels(),Layer<Dtype>::first_layer_hf_real[blob_id]->height(),Layer<Dtype>::first_layer_hf_real[blob_id]->width());
caffe_copy(top[0]->count(),Layer<Dtype>::first_layer_hf_real[blob_id]->mutable_gpu_data(),top[0]->mutable_gpu_data());
}
}
else
{
}
}
Dtype* clear_memory_cpu=Layer<Dtype>::clear_memory[0]->mutable_cpu_data();
if(clear_memory_cpu[0]>0.5) //清空申请的memory
{
cudaFree(this->d_in1); cudaFree(this->d_in2); cudaFree(this->d_in3); cudaFree(this->d_in4);
cudaFree(this->d_in_tmp1); cudaFree(this->d_in_tmp2); cudaFree(this->d_in_tmp3); cudaFree(this->d_in_tmp4);
cudaFree(this->d_freq1); cudaFree(this->d_freq2); cudaFree(this->d_freq3); cudaFree(this->d_freq4);
cufftDestroy(this->forward_plan[0]); cufftDestroy(this->forward_plan[1]); cufftDestroy(this->forward_plan[2]); cufftDestroy(this->forward_plan[3]);
if(feature_num==5)
{
cudaFree(this->d_in5);
cudaFree(this->d_in_tmp5);
cufftDestroy(this->forward_plan[4]);
cufftDestroy(this->inverse_plan[4]);
}
}
}
template <typename Dtype>
void WtfthirdLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
//LOG(INFO) << "start of convolutionlayer backward_gpu";
//CHECK((this->kstride_h_ == 1) && (this->kstride_w_ == 1)) << "Backward_gpu is not implemented for fully convolutin."
//LOG(INFO) << "end of convolutionlayer backward_gpu";
}
INSTANTIATE_LAYER_GPU_FUNCS(WtfthirdLayer);
} // namespace caffe
|
849dc7263f76239f713e7307529df1ca7f82ade9.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
#define SIZE 1024
__global__ void vectorAdd(int *a, int *b, int *c, int n)
{
int i = threadIdx.x;
if(i<n)
c[i]=a[i]+b[i];
} | 849dc7263f76239f713e7307529df1ca7f82ade9.cu | #include "includes.h"
#define SIZE 1024
__global__ void vectorAdd(int *a, int *b, int *c, int n)
{
int i = threadIdx.x;
if(i<n)
c[i]=a[i]+b[i];
} |
9659b03c9ae9cb9230a25cab56ce33a6b37e48cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <despot/GPUcore/thread_globals.h>
#include <despot/core/globals.h>
#include <map>
#include <ucontext.h>
#include <despot/solver/despot.h>
using namespace std;
using namespace despot;
__device__ __managed__ bool GPUDoPrint=false;
bool CPUDoPrint = false;
int CPUPrintPID = 0;
namespace despot {
namespace Globals {
ThreadParams ThreadParams::PARAMS(0);
ThreadStatistics ThreadStatistics::STATISTICS(0);
StreamManager StreamManager::MANAGER(0);
// Global mutex for the shared HyP-DESPOT tree
mutex global_mutex;
//Thread ID mapping
map<std::thread::id, int > ThreadIdMap;
bool force_print = false;
#ifdef WIN32
#include <windows.h>
#elif _POSIX_C_SOURCE >= 199309L
#include <time.h> // for nanosleep
#else
#include <unistd.h> // for usleep
#endif
void sleep_ms(int milliseconds) // cross-platform sleep function
{
#ifdef WIN32
Sleep(milliseconds);
#elif _POSIX_C_SOURCE >= 199309L
struct timespec ts;
ts.tv_sec = milliseconds / 1000;
ts.tv_nsec = (milliseconds % 1000) * 1000000;
nanosleep(&ts, NULL);
#else
usleep(milliseconds * 1000);
#endif
}
// Parameters for serialized printing in HyP-DESPOT. Typicallly used for debugging purposes.
ThreadParams::ThreadParams(int dummy){
PrintThreadID=0;
PrintParentEdge=18;
PrintEdge=2;
PrintDepth=1;
PrintAction=244;
}
// Multi-threading statistics
ThreadStatistics::ThreadStatistics(int dummy){
Active_thread_count = 0;
Serial_expansion_time = 0;
}
// CUDA concurrent kernel streams
StreamManager::StreamManager(int dummy){
stream_counter = 0;
cuda_streams = NULL;
Concurrency_threashold=INT_MAX;
}
void ChooseGPUForThread(){
if (Globals::config.useGPU){
int devicesCount;
hipGetDeviceCount(&devicesCount);
int deviceIndex = Globals::config.GPUid;
hipSetDevice(deviceIndex);
}
}
void RecordStartTime(){
ThreadStatistics::STATISTICS.start_time = Time::now();
}
void RecordSearchStartTime(){
ThreadStatistics::STATISTICS.search_start_time = Time::now();
}
// Return high-definition time in seconds
double ElapsedTime(){
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ThreadStatistics::STATISTICS.start_time);
return thread_d.count() / 1000000000.0f;
}
double ElapsedSearchTime() {
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ThreadStatistics::STATISTICS.search_start_time);
return thread_d.count() / 1000000000.0f;
}
double ElapsedTime(std::chrono::time_point<std::chrono::system_clock> ts){
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ts);
double passed_time=thread_d.count() / 1000000000.0f;
return passed_time;
}
void AddExpanded()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Expansion_Count++;
}
void ResetExpanded()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Expansion_Count=0;
}
int CountExpanded()
{
lock_guard<mutex> lck(global_mutex);
return ThreadStatistics::STATISTICS.Expansion_Count;
}
double SpeedUp(double parallel_time){
return ThreadStatistics::STATISTICS.Serial_expansion_time / parallel_time;
}
double Efficiency(double parallel_time){
double speedup = ThreadStatistics::STATISTICS.Serial_expansion_time / parallel_time;
return speedup / Globals::config.NUM_THREADS;
}
void AddSerialTime(float used_time)
{
ThreadStatistics::STATISTICS.Serial_expansion_time += used_time;
}
void ResetSerialTime()
{
ThreadStatistics::STATISTICS.Serial_expansion_time = 0;
}
void AddMappedThread(std::thread::id the_id, int mapped_id)
{
ThreadIdMap[the_id]=mapped_id;
}
int MapThread(std::thread::id the_id)
{
return ThreadIdMap[the_id];
}
void AddActiveThread()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Active_thread_count++;
}
void MinusActiveThread()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Active_thread_count--;
}
bool Timeout(float timeout)
{
auto t1 = Time::now();
fsec fs = t1 - ThreadStatistics::STATISTICS.search_start_time;
ns d = std::chrono::duration_cast<ns>(fs);
//if(d.count()/1000000000.0>=timeout)
// cout << "Time out for search " << d.count()/1000000000.0 << " s." << endl;
return d.count()/1000000000.0>=timeout;
}
void SetupCUDAStreams(){
if (Globals::config.use_multi_thread_) {
cout << "setting up CUDA streams for " << Globals::config.NUM_THREADS << " threads" << endl;
StreamManager::MANAGER.cuda_streams = new hipStream_t[Globals::config.NUM_THREADS];
for (int i = 0; i < Globals::config.NUM_THREADS; i++){
HANDLE_ERROR(hipStreamCreate(&StreamManager::MANAGER.cuda_streams[i]));
}
} else {
StreamManager::MANAGER.cuda_streams = NULL;
}
}
hipStream_t& GetThreadCUDAStream(int ThreadID){
return StreamManager::MANAGER.cuda_streams[ThreadID];
}
void DestroyCUDAStreams(){
if (StreamManager::MANAGER.cuda_streams) {
for (int i = 0; i < Globals::config.NUM_THREADS; i++)
HANDLE_ERROR(hipStreamDestroy(StreamManager::MANAGER.cuda_streams[i]));
delete[] StreamManager::MANAGER.cuda_streams;
StreamManager::MANAGER.cuda_streams = NULL;
}
}
void AdvanceStreamCounter(int stride) {
StreamManager::MANAGER.stream_counter += stride;
if (StreamManager::MANAGER.stream_counter >= Globals::config.NUM_THREADS)
StreamManager::MANAGER.stream_counter = 0;
}
int GetCurrentStream()
{
return StreamManager::MANAGER.stream_counter;
}
void lock_process()
{
global_mutex.lock();
}
void unlock_process()
{
global_mutex.unlock();
}
void Global_print_mutex(std::thread::id threadIdx,void* address,const char* func, int mode )
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" instance "<<address<<"::"<<func<<": ";
switch(mode)
{
case 0: cout<< "lock"; break;
case 1: cout<< "unlock"; break;
case 2: cout<< "relock with msg"; break;
case 3: cout<< "un-relock with msg"; break;
}
cout<<endl;
}
}
template<typename T>
void Global_print_node(std::thread::id threadIdx,void* node_address,int depth,float step_reward,
float value,float ub,float uub, float v_loss,float weight,T edge,float WEU, const char* msg)
{
if(force_print || /*node_address ==NULL ||*/FIX_SCENARIO==1 || DESPOT::Print_nodes)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID==ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1 /*true*/)
{
cout.precision(6);
if(weight!=0)
{
cout<<"thread "<<threadID<<" "<<msg<<" get old node at depth "<<depth
<<": weight="<<weight<<", reward="<<step_reward/weight
<<", lb="<<value/weight<<", ub="<<ub/weight;
if(uub>-1000)
cout <<", uub="<<uub/weight;
cout<<", edge="<< edge ;
if(WEU>-1000)
cout <<", WEU="<<WEU;
cout<<", v_loss="<<v_loss/weight;
}
else
{
cout<<"thread "<<threadID<<" "<<msg<<" get old node at depth "<<depth
<<": weight="<<weight<<", reward="<<step_reward
<<", lb="<<value<<", ub="<<ub;
if(uub>-1000)
cout <<", uub="<<uub;
cout<<", edge="<< edge ;
if(WEU>-1000)
cout <<", WEU="<<WEU;
cout <<", v_loss="<<v_loss;
}
cout<<endl;
}
}
}
template void Global_print_node<int>(std::thread::id threadIdx,void* node_address,
int depth,float step_reward, float value, float ub, float uub, float v_loss,float weight,
int edge, float weu, const char* msg);
template void Global_print_node<uint64_t>(std::thread::id threadIdx,void* node_address,
int depth,float step_reward, float value, float ub, float uub, float v_loss,float weight,
uint64_t edge, float weu, const char* msg);
void Global_print_child(std::thread::id threadIdx,void* node_address,int depth, int v_star)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout<<"thread "<<threadID<<" node "<<node_address<<" at depth "<<depth<<" select optimal child "<<v_star;
cout<<endl;
}
}
}
void Global_print_expand(std::thread::id threadIdx,void* node_address,int depth, int obs)
{
if(force_print || FIX_SCENARIO==1 || DESPOT::Print_nodes)
{
lock_guard<mutex> lck(global_mutex);
int threadID=0;
if(Globals::config.use_multi_thread_)
threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout<<"thread "<<threadID<<" expand node "<<node_address<<" at depth "<<depth<< " edge "<<obs;
cout<<endl;
}
}
}
void Global_print_queue(std::thread::id threadIdx,void* node_address,bool empty_queue)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" "<<node_address<<"::"<<", _queue.empty()="<<empty_queue<<", Active_thread_count= "<<ThreadStatistics::STATISTICS.Active_thread_count<<endl;
}
}
void Global_print_down(std::thread::id threadIdx,void* node_address,int depth)
{
if(force_print)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" trace to node "<<node_address<<" at depth "<<depth;
cout<<endl;
}
}
void Global_print_deleteT(std::thread::id threadIdx,int mode, int the_case)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
switch(mode)
{
case 0:cout<<"Delete expansion thread "<<threadID;break;
case 1:cout<<"Delete printing thread "<<threadID;break;
};
switch(the_case)
{
case 0:cout<<" due to NULL ptr"<<endl;break;
case 1:cout<<" due to time out"<<endl;break;
};
cout<<endl;
}
}
void Global_print_GPUuse(std::thread::id threadIdx)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"call GPU func with expansion thread "<<threadID<<endl;
}
}
void Global_print_message(std::thread::id threadIdx, char* msg)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"Msg from thread "<<threadID<<" "<<msg<<endl;
}
}
void Global_print_value(std::thread::id threadIdx, double value, char* msg)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout.precision(4);
cout<<msg<<" Value from thread "<<threadID<<" "<<value<<endl;
}
}
}
}//namespace globals
} // namespace despot
| 9659b03c9ae9cb9230a25cab56ce33a6b37e48cf.cu | #include <despot/GPUcore/thread_globals.h>
#include <despot/core/globals.h>
#include <map>
#include <ucontext.h>
#include <despot/solver/despot.h>
using namespace std;
using namespace despot;
__device__ __managed__ bool GPUDoPrint=false;
bool CPUDoPrint = false;
int CPUPrintPID = 0;
namespace despot {
namespace Globals {
ThreadParams ThreadParams::PARAMS(0);
ThreadStatistics ThreadStatistics::STATISTICS(0);
StreamManager StreamManager::MANAGER(0);
// Global mutex for the shared HyP-DESPOT tree
mutex global_mutex;
//Thread ID mapping
map<std::thread::id, int > ThreadIdMap;
bool force_print = false;
#ifdef WIN32
#include <windows.h>
#elif _POSIX_C_SOURCE >= 199309L
#include <time.h> // for nanosleep
#else
#include <unistd.h> // for usleep
#endif
void sleep_ms(int milliseconds) // cross-platform sleep function
{
#ifdef WIN32
Sleep(milliseconds);
#elif _POSIX_C_SOURCE >= 199309L
struct timespec ts;
ts.tv_sec = milliseconds / 1000;
ts.tv_nsec = (milliseconds % 1000) * 1000000;
nanosleep(&ts, NULL);
#else
usleep(milliseconds * 1000);
#endif
}
// Parameters for serialized printing in HyP-DESPOT. Typicallly used for debugging purposes.
ThreadParams::ThreadParams(int dummy){
PrintThreadID=0;
PrintParentEdge=18;
PrintEdge=2;
PrintDepth=1;
PrintAction=244;
}
// Multi-threading statistics
ThreadStatistics::ThreadStatistics(int dummy){
Active_thread_count = 0;
Serial_expansion_time = 0;
}
// CUDA concurrent kernel streams
StreamManager::StreamManager(int dummy){
stream_counter = 0;
cuda_streams = NULL;
Concurrency_threashold=INT_MAX;
}
void ChooseGPUForThread(){
if (Globals::config.useGPU){
int devicesCount;
cudaGetDeviceCount(&devicesCount);
int deviceIndex = Globals::config.GPUid;
cudaSetDevice(deviceIndex);
}
}
void RecordStartTime(){
ThreadStatistics::STATISTICS.start_time = Time::now();
}
void RecordSearchStartTime(){
ThreadStatistics::STATISTICS.search_start_time = Time::now();
}
// Return high-definition time in seconds
double ElapsedTime(){
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ThreadStatistics::STATISTICS.start_time);
return thread_d.count() / 1000000000.0f;
}
double ElapsedSearchTime() {
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ThreadStatistics::STATISTICS.search_start_time);
return thread_d.count() / 1000000000.0f;
}
double ElapsedTime(std::chrono::time_point<std::chrono::system_clock> ts){
ns thread_d = std::chrono::duration_cast < ns > (Time::now() - ts);
double passed_time=thread_d.count() / 1000000000.0f;
return passed_time;
}
void AddExpanded()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Expansion_Count++;
}
void ResetExpanded()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Expansion_Count=0;
}
int CountExpanded()
{
lock_guard<mutex> lck(global_mutex);
return ThreadStatistics::STATISTICS.Expansion_Count;
}
double SpeedUp(double parallel_time){
return ThreadStatistics::STATISTICS.Serial_expansion_time / parallel_time;
}
double Efficiency(double parallel_time){
double speedup = ThreadStatistics::STATISTICS.Serial_expansion_time / parallel_time;
return speedup / Globals::config.NUM_THREADS;
}
void AddSerialTime(float used_time)
{
ThreadStatistics::STATISTICS.Serial_expansion_time += used_time;
}
void ResetSerialTime()
{
ThreadStatistics::STATISTICS.Serial_expansion_time = 0;
}
void AddMappedThread(std::thread::id the_id, int mapped_id)
{
ThreadIdMap[the_id]=mapped_id;
}
int MapThread(std::thread::id the_id)
{
return ThreadIdMap[the_id];
}
void AddActiveThread()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Active_thread_count++;
}
void MinusActiveThread()
{
lock_guard<mutex> lck(global_mutex);
ThreadStatistics::STATISTICS.Active_thread_count--;
}
bool Timeout(float timeout)
{
auto t1 = Time::now();
fsec fs = t1 - ThreadStatistics::STATISTICS.search_start_time;
ns d = std::chrono::duration_cast<ns>(fs);
//if(d.count()/1000000000.0>=timeout)
// cout << "Time out for search " << d.count()/1000000000.0 << " s." << endl;
return d.count()/1000000000.0>=timeout;
}
void SetupCUDAStreams(){
if (Globals::config.use_multi_thread_) {
cout << "setting up CUDA streams for " << Globals::config.NUM_THREADS << " threads" << endl;
StreamManager::MANAGER.cuda_streams = new cudaStream_t[Globals::config.NUM_THREADS];
for (int i = 0; i < Globals::config.NUM_THREADS; i++){
HANDLE_ERROR(cudaStreamCreate(&StreamManager::MANAGER.cuda_streams[i]));
}
} else {
StreamManager::MANAGER.cuda_streams = NULL;
}
}
cudaStream_t& GetThreadCUDAStream(int ThreadID){
return StreamManager::MANAGER.cuda_streams[ThreadID];
}
void DestroyCUDAStreams(){
if (StreamManager::MANAGER.cuda_streams) {
for (int i = 0; i < Globals::config.NUM_THREADS; i++)
HANDLE_ERROR(cudaStreamDestroy(StreamManager::MANAGER.cuda_streams[i]));
delete[] StreamManager::MANAGER.cuda_streams;
StreamManager::MANAGER.cuda_streams = NULL;
}
}
void AdvanceStreamCounter(int stride) {
StreamManager::MANAGER.stream_counter += stride;
if (StreamManager::MANAGER.stream_counter >= Globals::config.NUM_THREADS)
StreamManager::MANAGER.stream_counter = 0;
}
int GetCurrentStream()
{
return StreamManager::MANAGER.stream_counter;
}
void lock_process()
{
global_mutex.lock();
}
void unlock_process()
{
global_mutex.unlock();
}
void Global_print_mutex(std::thread::id threadIdx,void* address,const char* func, int mode )
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" instance "<<address<<"::"<<func<<": ";
switch(mode)
{
case 0: cout<< "lock"; break;
case 1: cout<< "unlock"; break;
case 2: cout<< "relock with msg"; break;
case 3: cout<< "un-relock with msg"; break;
}
cout<<endl;
}
}
template<typename T>
void Global_print_node(std::thread::id threadIdx,void* node_address,int depth,float step_reward,
float value,float ub,float uub, float v_loss,float weight,T edge,float WEU, const char* msg)
{
if(force_print || /*node_address ==NULL ||*/FIX_SCENARIO==1 || DESPOT::Print_nodes)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID==ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1 /*true*/)
{
cout.precision(6);
if(weight!=0)
{
cout<<"thread "<<threadID<<" "<<msg<<" get old node at depth "<<depth
<<": weight="<<weight<<", reward="<<step_reward/weight
<<", lb="<<value/weight<<", ub="<<ub/weight;
if(uub>-1000)
cout <<", uub="<<uub/weight;
cout<<", edge="<< edge ;
if(WEU>-1000)
cout <<", WEU="<<WEU;
cout<<", v_loss="<<v_loss/weight;
}
else
{
cout<<"thread "<<threadID<<" "<<msg<<" get old node at depth "<<depth
<<": weight="<<weight<<", reward="<<step_reward
<<", lb="<<value<<", ub="<<ub;
if(uub>-1000)
cout <<", uub="<<uub;
cout<<", edge="<< edge ;
if(WEU>-1000)
cout <<", WEU="<<WEU;
cout <<", v_loss="<<v_loss;
}
cout<<endl;
}
}
}
template void Global_print_node<int>(std::thread::id threadIdx,void* node_address,
int depth,float step_reward, float value, float ub, float uub, float v_loss,float weight,
int edge, float weu, const char* msg);
template void Global_print_node<uint64_t>(std::thread::id threadIdx,void* node_address,
int depth,float step_reward, float value, float ub, float uub, float v_loss,float weight,
uint64_t edge, float weu, const char* msg);
void Global_print_child(std::thread::id threadIdx,void* node_address,int depth, int v_star)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout<<"thread "<<threadID<<" node "<<node_address<<" at depth "<<depth<<" select optimal child "<<v_star;
cout<<endl;
}
}
}
void Global_print_expand(std::thread::id threadIdx,void* node_address,int depth, int obs)
{
if(force_print || FIX_SCENARIO==1 || DESPOT::Print_nodes)
{
lock_guard<mutex> lck(global_mutex);
int threadID=0;
if(Globals::config.use_multi_thread_)
threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout<<"thread "<<threadID<<" expand node "<<node_address<<" at depth "<<depth<< " edge "<<obs;
cout<<endl;
}
}
}
void Global_print_queue(std::thread::id threadIdx,void* node_address,bool empty_queue)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" "<<node_address<<"::"<<", _queue.empty()="<<empty_queue<<", Active_thread_count= "<<ThreadStatistics::STATISTICS.Active_thread_count<<endl;
}
}
void Global_print_down(std::thread::id threadIdx,void* node_address,int depth)
{
if(force_print)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"thread "<<threadID<<" trace to node "<<node_address<<" at depth "<<depth;
cout<<endl;
}
}
void Global_print_deleteT(std::thread::id threadIdx,int mode, int the_case)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
switch(mode)
{
case 0:cout<<"Delete expansion thread "<<threadID;break;
case 1:cout<<"Delete printing thread "<<threadID;break;
};
switch(the_case)
{
case 0:cout<<" due to NULL ptr"<<endl;break;
case 1:cout<<" due to time out"<<endl;break;
};
cout<<endl;
}
}
void Global_print_GPUuse(std::thread::id threadIdx)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"call GPU func with expansion thread "<<threadID<<endl;
}
}
void Global_print_message(std::thread::id threadIdx, char* msg)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
cout<<"Msg from thread "<<threadID<<" "<<msg<<endl;
}
}
void Global_print_value(std::thread::id threadIdx, double value, char* msg)
{
if(false)
{
lock_guard<mutex> lck(global_mutex);
int threadID=MapThread(threadIdx);
if(threadID == ThreadParams::PARAMS.PrintThreadID || ThreadParams::PARAMS.PrintThreadID == -1)
{
cout.precision(4);
cout<<msg<<" Value from thread "<<threadID<<" "<<value<<endl;
}
}
}
}//namespace globals
} // namespace despot
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.