voidmaster
voidmaster

Reputation: 3

With the new maxwell architecture do i have to use shared memory?

A lot of cuda samples show that you have to put data from global memory into shared memory before using it. For example let's consider a function that sums values in 5x5 squares. Profiler shows that version with no shared memory works like 20% faster. Do i have to put my data into shared memory or maxwell will put the data into L1 cache automatically?

Upvotes: 0

Views: 89

Answers (1)

Robert Crovella
Robert Crovella

Reputation: 151849

Shared memory is still a useful optimization for many codes, even on Maxwell.

If you have a 2D stencil code (appears to be what you are describing) I would certainly expect the version that runs out of shared memory to perform faster, assuming you are doing the shared memory adaptation/usage correctly.

Here's a fully worked example of a 2D stencil code, in both shared memory and non-shared-memory versions, running on a GTX 960. The shared memory version runs about 33% faster:

non-shared memory version:

$ cat example3a_imp.cu
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
// these are just for timing measurments
#include <time.h>
// Code that reads values from a 2D grid and for each node in the grid finds the minumum
// value among all values stored in cells sharing that node, and stores the minumum
// value in that node.


//define the window size (square window) and the data set size
#define WSIZE 16
#define DATAHSIZE 8000
#define DATAWSIZE 16000
#define CHECK_VAL 1
#define MIN(X,Y) ((X<Y)?X:Y)
#define BLKWSIZE 32
#define BLKHSIZE 32

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

typedef int oArray[DATAHSIZE];
typedef int iArray[DATAHSIZE+WSIZE];

__global__ void cmp_win(oArray *output, const iArray *input)
{
    int tempout, i, j;
    int idx = blockIdx.x*blockDim.x + threadIdx.x;
    int idy = blockIdx.y*blockDim.y + threadIdx.y;
    if ((idx < DATAHSIZE) && (idy < DATAWSIZE)){
      tempout = output[idy][idx];
#pragma unroll
      for (i=0; i<WSIZE; i++)
#pragma unroll
        for (j=0; j<WSIZE; j++)
          if (input[idy + i][idx + j] < tempout)
            tempout = input[idy + i][idx + j];
      output[idy][idx] = tempout;
      }
}

int main(int argc, char *argv[])
{
    int i, j;
    const dim3 blockSize(BLKHSIZE, BLKWSIZE, 1);
    const dim3 gridSize(((DATAHSIZE+BLKHSIZE-1)/BLKHSIZE), ((DATAWSIZE+BLKWSIZE-1)/BLKWSIZE), 1);
// these are just for timing
    clock_t t0, t1, t2;
    double t1sum=0.0;
    double t2sum=0.0;
// overall data set sizes
    const int nr = DATAHSIZE;
    const int nc = DATAWSIZE;
// window dimensions
    const int wr = WSIZE;
    const int wc = WSIZE;
// pointers for data set storage via malloc
    iArray *h_in, *d_in;
    oArray *h_out, *d_out;
// start timing
    t0 = clock();
// allocate storage for data set
    if ((h_in = (iArray *)malloc(((nr+wr)*(nc+wc))*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1);}
    if ((h_out = (oArray *)malloc((nr*nc)*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1); }
// synthesize data
    printf("Begin init\n");
    memset(h_in, 0x7F, (nr+wr)*(nc+wc)*sizeof(int));
    memset(h_out, 0x7F, (nr*nc)*sizeof(int));
    for (i=0; i<nc+wc; i+=wc)
      for (j=0; j< nr+wr; j+=wr)
        h_in[i][j] = CHECK_VAL;
    t1 = clock();
    t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
    printf("Init took %f seconds.  Begin compute\n", t1sum);
// allocate GPU device buffers
    cudaMalloc((void **) &d_in, (((nr+wr)*(nc+wc))*sizeof(int)));
    cudaCheckErrors("Failed to allocate device buffer");
    cudaMalloc((void **) &d_out, ((nr*nc)*sizeof(int)));
    cudaCheckErrors("Failed to allocate device buffer2");
// copy data to GPU
    cudaMemcpy(d_out, h_out, ((nr*nc)*sizeof(int)), cudaMemcpyHostToDevice);
    cudaCheckErrors("CUDA memcpy failure");
    cudaMemcpy(d_in, h_in, (((nr+wr)*(nc+wc))*sizeof(int)), cudaMemcpyHostToDevice);
    cudaCheckErrors("CUDA memcpy2 failure");

    cmp_win<<<gridSize,blockSize>>>(d_out, d_in);
    cudaCheckErrors("Kernel launch failure");
// copy output data back to host

    cudaMemcpy(h_out, d_out, ((nr*nc)*sizeof(int)), cudaMemcpyDeviceToHost);
    cudaCheckErrors("CUDA memcpy3 failure");
    t2 = clock();
    t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
    printf ("Done. Compute took %f seconds\n", t2sum);
    for (i=0; i < nc; i++)
      for (j=0; j < nr; j++)
        if (h_out[i][j] != CHECK_VAL) {printf("mismatch at %d,%d, was: %d should be: %d\n", i,j,h_out[i][j], CHECK_VAL); return 1;}
    printf("Results pass\n");

    return 0;
}

shared memory version:

$ cat example3b_imp.cu
#include <stdio.h>
#include <stdlib.h>
// these are just for timing measurments
#include <time.h>
// Code that reads values from a 2D grid and for each node in the grid finds the minumum
// value among all values stored in cells sharing that node, and stores the minumum
// value in that node.


//define the window size (square window) and the data set size
#define WSIZE 16
#define DATAHSIZE 8000
#define DATAWSIZE 16000
#define CHECK_VAL 1
#define MIN(X,Y) ((X<Y)?X:Y)
#define BLKWSIZE 32
#define BLKHSIZE 32

#define cudaCheckErrors(msg) \
    do { \
        cudaError_t __err = cudaGetLastError(); \
        if (__err != cudaSuccess) { \
            fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
                msg, cudaGetErrorString(__err), \
                __FILE__, __LINE__); \
            fprintf(stderr, "*** FAILED - ABORTING\n"); \
            exit(1); \
        } \
    } while (0)

typedef int oArray[DATAHSIZE];
typedef int iArray[DATAHSIZE+WSIZE];

__global__ void cmp_win(oArray *output, const iArray *input)
{
    __shared__ int smem[(BLKHSIZE + (WSIZE-1))][(BLKWSIZE + (WSIZE-1))];
    int tempout, i, j;
    int idx = blockIdx.x*blockDim.x + threadIdx.x;
    int idy = blockIdx.y*blockDim.y + threadIdx.y;
    if ((idx < DATAHSIZE) && (idy < DATAWSIZE)){
      smem[threadIdx.y][threadIdx.x]=input[idy][idx];
      if (threadIdx.y > (BLKWSIZE - WSIZE))
        smem[threadIdx.y + (WSIZE-1)][threadIdx.x] = input[idy+(WSIZE-1)][idx];
      if (threadIdx.x > (BLKHSIZE - WSIZE))
        smem[threadIdx.y][threadIdx.x + (WSIZE-1)] = input[idy][idx+(WSIZE-1)];
      if ((threadIdx.x > (BLKHSIZE - WSIZE)) && (threadIdx.y > (BLKWSIZE - WSIZE)))
        smem[threadIdx.y + (WSIZE-1)][threadIdx.x + (WSIZE-1)] = input[idy+(WSIZE-1)][idx+(WSIZE-1)];
      __syncthreads();
      tempout = output[idy][idx];
      for (i=0; i<WSIZE; i++)
        for (j=0; j<WSIZE; j++)
          if (smem[threadIdx.y + i][threadIdx.x + j] < tempout)
            tempout = smem[threadIdx.y + i][threadIdx.x + j];
      output[idy][idx] = tempout;
      }
}

int main(int argc, char *argv[])
{
    int i, j;
    const dim3 blockSize(BLKHSIZE, BLKWSIZE, 1);
    const dim3 gridSize(((DATAHSIZE+BLKHSIZE-1)/BLKHSIZE), ((DATAWSIZE+BLKWSIZE-1)/BLKWSIZE), 1);
// these are just for timing
    clock_t t0, t1, t2;
    double t1sum=0.0;
    double t2sum=0.0;
// overall data set sizes
    const int nr = DATAHSIZE;
    const int nc = DATAWSIZE;
// window dimensions
    const int wr = WSIZE;
    const int wc = WSIZE;
// pointers for data set storage via malloc
    iArray *h_in, *d_in;
    oArray *h_out, *d_out;
// start timing
    t0 = clock();
// allocate storage for data set
    if ((h_in = (iArray *)malloc(((nr+wr)*(nc+wc))*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1);}
    if ((h_out = (oArray *)malloc((nr*nc)*sizeof(int))) == 0) {printf("malloc Fail \n"); exit(1); }
// synthesize data
    printf("Begin init\n");
    memset(h_in, 0x7F, (nr+wr)*(nc+wc)*sizeof(int));
    memset(h_out, 0x7F, (nr*nc)*sizeof(int));
    for (i=0; i<nc+wc; i+=wc)
      for (j=0; j< nr+wr; j+=wr)
        h_in[i][j] = CHECK_VAL;
    t1 = clock();
    t1sum = ((double)(t1-t0))/CLOCKS_PER_SEC;
    printf("Init took %f seconds.  Begin compute\n", t1sum);
// allocate GPU device buffers
    cudaMalloc((void **) &d_in, (((nr+wr)*(nc+wc))*sizeof(int)));
    cudaCheckErrors("Failed to allocate device buffer");
    cudaMalloc((void **) &d_out, ((nr*nc)*sizeof(int)));
    cudaCheckErrors("Failed to allocate device buffer2");
// copy data to GPU
    cudaMemcpy(d_out, h_out, ((nr*nc)*sizeof(int)), cudaMemcpyHostToDevice);
    cudaCheckErrors("CUDA memcpy failure");
    cudaMemcpy(d_in, h_in, (((nr+wr)*(nc+wc))*sizeof(int)), cudaMemcpyHostToDevice);
    cudaCheckErrors("CUDA memcpy2 failure");

    cmp_win<<<gridSize,blockSize>>>(d_out, d_in);
    cudaCheckErrors("Kernel launch failure");
// copy output data back to host

    cudaMemcpy(h_out, d_out, ((nr*nc)*sizeof(int)), cudaMemcpyDeviceToHost);
    cudaCheckErrors("CUDA memcpy3 failure");
    t2 = clock();
    t2sum = ((double)(t2-t1))/CLOCKS_PER_SEC;
    printf ("Done. Compute took %f seconds\n", t2sum);
    for (i=0; i < nc; i++)
      for (j=0; j < nr; j++)
        if (h_out[i][j] != CHECK_VAL) {printf("mismatch at %d,%d, was: %d should be: %d\n", i,j,h_out[i][j], CHECK_VAL); return 1;}
    printf("Results pass\n");

    return 0;
}

test:

$ nvcc -O3 -arch=sm_52 example3a_imp.cu -o ex3
$ nvcc -O3 -arch=sm_52 example3b_imp.cu -o ex3_shared
$ ./ex3
Begin init
Init took 0.986819 seconds.  Begin compute
Done. Compute took 2.162276 seconds
Results pass
$ ./ex3_shared
Begin init
Init took 0.987281 seconds.  Begin compute
Done. Compute took 1.522475 seconds
Results pass
$

Upvotes: 2

Related Questions