Comparing the Performance of the Rectangular Shared Memory Kernels with grid (1,1) block (16,16)
To compare the performance of Rectangular Shared Memory Kernels with a grid (1,1) and a block (16,16) for Matrix Transposition.
Step 1: Define constants for block dimensions, padding, and shared memory configurations.
Step 2: Implement a function to print data.
Step 3: Implement multiple kernel functions for performing matrix transposition using shared memory.
Step 4: Set up CUDA device, configure grid and block dimensions, allocate device memory, perform matrix transposition using each kernel function, copy results to host, optionally print the data, and free the allocated memory.
Step 5: Reset the CUDA device.
Step 6: Terminate the program.
DEVELOPED BY : Gowri M
REGISTER NO : 212220230019
#include "common.h"
#include <cuda_runtime.h>
#include <stdio.h>
#define BDIMX 16
#define BDIMY 16
#define IPAD 2
void printData(char *msg, int *in, const int size)
{
printf("%s: ", msg);
for (int i = 0; i < size; i++)
{
printf("%4d", in[i]);
fflush(stdout);
}
printf("\n\n");
}
__global__ void setRowReadRow(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.y][threadIdx.x] ;
}
__global__ void setColReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMX][BDIMY];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// shared memory store operation
tile[threadIdx.x][threadIdx.y] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[threadIdx.x][threadIdx.y];
}
__global__ void setColReadCol2(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from 2D thread index to linear memory
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed coordinate (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// shared memory store operation
tile[icol][irow] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[icol][irow] ;
}
__global__ void setRowReadCol(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX];
// mapping from 2D thread index to linear memory
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed coordinate (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[icol][irow];
}
__global__ void setRowReadColPad(int *out)
{
// static shared memory
__shared__ int tile[BDIMY][BDIMX + IPAD];
// mapping from 2D thread index to linear memory
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// shared memory store operation
tile[threadIdx.y][threadIdx.x] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[icol][irow] ;
}
__global__ void setRowReadColDyn(int *out)
{
// dynamic shared memory
extern __shared__ int tile[];
// mapping from thread index to global memory index
unsigned int idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed (row, col)
unsigned int irow = idx / blockDim.y;
unsigned int icol = idx % blockDim.y;
// convert back to smem idx to access the transposed element
unsigned int col_idx = icol * blockDim.x + irow;
// shared memory store operation
tile[idx] = idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[idx] = tile[col_idx];
}
__global__ void setRowReadColDynPad(int *out)
{
// dynamic shared memory
extern __shared__ int tile[];
// mapping from thread index to global memory index
unsigned int g_idx = threadIdx.y * blockDim.x + threadIdx.x;
// convert idx to transposed (row, col)
unsigned int irow = g_idx / blockDim.y;
unsigned int icol = g_idx % blockDim.y;
unsigned int row_idx = threadIdx.y * (blockDim.x + IPAD) + threadIdx.x;
// convert back to smem idx to access the transposed element
unsigned int col_idx = icol * (blockDim.x + IPAD) + irow;
// shared memory store operation
tile[row_idx] = g_idx;
// wait for all threads to complete
__syncthreads();
// shared memory load operation
out[g_idx] = tile[col_idx];
}
int main(int argc, char ** argv)
{
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("%s at ", argv[0]);
printf("device %d: %s ", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
cudaSharedMemConfig pConfig;
CHECK(cudaDeviceGetSharedMemConfig ( &pConfig ));
printf("with Bank Mode:%s ", pConfig == 1 ? "4-Byte" : "8-Byte");
// set up array size
int nx = BDIMX;
int ny = BDIMY;
bool iprintf = 0;
if (argc > 1) iprintf = atoi(argv[1]);
size_t nBytes = nx * ny * sizeof(int);
// execution configuration
dim3 block (BDIMX, BDIMY);
dim3 grid (1, 1);
printf("<<< grid (%d,%d) block (%d,%d)>>>\n", grid.x, grid.y, block.x,
block.y);
// allocate device memory
int *d_C;
CHECK(cudaMalloc((int**)&d_C, nBytes));
int *gpuRef = (int *)malloc(nBytes);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadRow<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setRowReadRow ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setColReadCol<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setColReadCol ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setColReadCol2<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setColReadCol2 ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadCol<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setRowReadCol ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColDyn<<<grid, block, BDIMX*BDIMY*sizeof(int)>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setRowReadColDyn ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColPad<<<grid, block>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setRowReadColPad ", gpuRef, nx * ny);
CHECK(cudaMemset(d_C, 0, nBytes));
setRowReadColDynPad<<<grid, block, (BDIMX + IPAD)*BDIMY*sizeof(int)>>>(d_C);
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
if(iprintf) printData("setRowReadColDynPad ", gpuRef, nx * ny);
// free host and device memory
CHECK(cudaFree(d_C));
free(gpuRef);
// reset device
CHECK(cudaDeviceReset());
return EXIT_SUCCESS;
}
Thus,the program to compare the performance of Rectangular Shared Memory Kernels with a grid (1,1) and a block (16,16) for Matrix Transposition has been successfully executed.