Sleeme
Sleeme

Reputation: 71

Cuda thrust global memory writing very slow

I am currently writing a code, that calculates a integral Histogram on the GPU using the Nvidia thrust library.

Therefore I allocate a continuous Block of device memory which I update with a custom functor all the time.

The problem is, that the write to the device memory is veeery slow, but the reads are actually ok.

The basic setup is the following:

struct HistogramCreation
{
    HistogramCreation(
    ...
    // pointer to memory
    ...
    ){}

    /// The actual summation operator
    __device__ void operator()(int index){
       .. do the calculations ..
       for(int j=0;j<30;j++){

       (1)  *_memoryPointer =  values (also using reads to such locations) ;

       }
  }
}

void foo(){

  cudaMalloc(_pointer,size);

  HistogramCreation initialCreation( ... _pointer ...);
  thrust::for_each(
    thrust::make_counting_iterator(0),
    thrust::make_counting_iterator(_imageSize),
    initialCreation);
}

if I change the writing in (1) to the following>

unsigned int val = values;

The performance is much better. THis is the only global memory write I have.

Using the memory write I get about 2s for HD Footage. using the local variable it takes about 50 ms so about a factor of 40 less.

Why is this so slow? how could I improve it?

Upvotes: 0

Views: 2173

Answers (3)

Eugene
Eugene

Reputation: 9474

Note that NVCC might optimize out a lot of your code after you make the modification - it detects that no write to global memory is made and just removes the "unneeded" code. So this speedup may not be coming out of the global writer per ce.

I would recommend using profiler on your actual code (the one with global write) to see if there's anything like unaligned access or other perf problem.

Upvotes: 1

ardiyu07
ardiyu07

Reputation: 1800

Just as @OlegTitov said, frequent load/store with global memory should be avoided as much as possible. When there's a situation where it's inevitable, then coalesced memory access can help the execution process not to get too slow; however in most cases, histogram calculation is pretty tough to realize the coalesced access.

While most of the above is basically just restating @OlegTitov's answer, i'd just like to share about an investigation i did about finding summation with NVIDIA CUDA. Actually the result is pretty interesting and i hope it'll be a helpful information for other xcuda developers.

The experiment was basically to run a speed test of finding summation with various memory access patterns: using global memory (1 thread), L2 cache (atomic ops - 128 threads), and L1 cache (shared mem - 128 threads)

This experiment used: Kepler GTX 680, 1546 cores @ 1.06GHz GDDR5 256-bit @ 3GHz

Here are the kernels:

__global__
void glob(float *h) {
    float* hist = h;
    uint sd = SEEDRND;
    uint random;
    for (int i = 0; i < NUMLOOP; i++) {
        if (i%NTHREADS==0) random = rnd(sd);
        int rind = random % NBIN;
        float randval = (float)(random % 10)*1.0f ;
        hist[rind] += randval;
    }
}

__global__
void atom(float *h) {
    float* hist = h;
    uint sd = SEEDRND;
    for (int i = threadIdx.x; i < NUMLOOP; i+=NTHREADS) {
        uint random = rnd(sd);
        int rind = random % NBIN;
    float randval = (float)(random % 10)*1.0f ;
        atomicAdd(&hist[rind], randval);
    }
}

__global__
void shm(float *h) {
    int lid = threadIdx.x;
    uint sd = SEEDRND;

    __shared__ float shm[NTHREADS][NBIN];
    for (int i = 0; i < NBIN; i++) shm[lid][i] = h[i];

    for (int i = lid; i < NUMLOOP; i+=NTHREADS) {
        uint random = rnd(sd);
        int rind = random % NBIN;
        float randval = (float)(random % 10)*1.0f ;
        shm[lid][rind] += randval;
    }

    /* reduction here */
    for (int i = 0; i < NBIN; i++) {
        __syncthreads();
        if (threadIdx.x < 64) {
            shm[threadIdx.x][i] += shm[threadIdx.x+64][i];
        }
        __syncthreads();
        if (threadIdx.x < 32) {
            shm[threadIdx.x][i] += shm[threadIdx.x+32][i];
        }
        __syncthreads();
        if (threadIdx.x < 16) {
            shm[threadIdx.x][i] += shm[threadIdx.x+16][i];
        }
        __syncthreads();
        if (threadIdx.x < 8) {
            shm[threadIdx.x][i] += shm[threadIdx.x+8][i];
        }
        __syncthreads();
        if (threadIdx.x < 4) {
            shm[threadIdx.x][i] += shm[threadIdx.x+4][i];
        }
        __syncthreads();
        if (threadIdx.x < 2) {
            shm[threadIdx.x][i] += shm[threadIdx.x+2][i];
        }
        __syncthreads();
        if (threadIdx.x == 0) {
            shm[0][i] += shm[1][i];
        }
    }

    for (int i = 0; i < NBIN; i++) h[i] = shm[0][i];
}

OUTPUT

atom:  102656.00 shm:  102656.00 glob:  102656.00
atom:  122240.00 shm:  122240.00 glob:  122240.00
... blah blah blah ...

  One Thread: 126.3919 msec
      Atomic:   7.5459 msec
      Sh_mem:   2.2207 msec

The ratio between these kernels is 57:17:1. Many things can be analyzed here, and it truly does not mean that using L1 or L2 memory spaces will always give you more than 10 times speedup of the whole program.

And here's the main and other funcs:

#include <iostream>
#include <cstdlib>
#include <cstdio>
using namespace std;

#define NUMLOOP 1000000
#define NBIN 36
#define SEEDRND 1

#define NTHREADS 128
#define NBLOCKS 1

__device__ uint rnd(uint & seed) {
#if LONG_MAX > (16807*2147483647)
    int const a    = 16807;
    int const m    = 2147483647;
    seed = (long(seed * a))%m;
    return seed;
#else
    double const a    = 16807;
    double const m    = 2147483647;

    double temp = seed * a;
    seed = (int) (temp - m * floor(temp/m));
    return seed;
#endif
}

... the above kernels ...

int main()
{
    float *h_hist, *h_hist2, *h_hist3, *d_hist, *d_hist2,
    *d_hist3;
    h_hist = (float*)malloc(NBIN * sizeof(float));
    h_hist2 = (float*)malloc(NBIN * sizeof(float));
    h_hist3 = (float*)malloc(NBIN * sizeof(float));
    cudaMalloc((void**)&d_hist, NBIN * sizeof(float));
    cudaMalloc((void**)&d_hist2, NBIN * sizeof(float));
    cudaMalloc((void**)&d_hist3, NBIN * sizeof(float));

    for (int i = 0; i < NBIN; i++) h_hist[i] = 0.0f;
    cudaMemcpy(d_hist, h_hist, NBIN * sizeof(float),
    cudaMemcpyHostToDevice);
    cudaMemcpy(d_hist2, h_hist, NBIN * sizeof(float),
    cudaMemcpyHostToDevice);
    cudaMemcpy(d_hist3, h_hist, NBIN * sizeof(float),
    cudaMemcpyHostToDevice);

    cudaEvent_t start, end;
    float elapsed = 0, elapsed2 = 0, elapsed3;
    cudaEventCreate(&start);
    cudaEventCreate(&end);

    cudaEventRecord(start, 0);

    atom<<<NBLOCKS, NTHREADS>>>(d_hist);
    cudaThreadSynchronize();

    cudaEventRecord(end, 0);
    cudaEventSynchronize(start);
    cudaEventSynchronize(end);
    cudaEventElapsedTime(&elapsed, start, end);

    cudaEventRecord(start, 0);

    shm<<<NBLOCKS, NTHREADS>>>(d_hist2);
    cudaThreadSynchronize();

    cudaEventRecord(end, 0);
    cudaEventSynchronize(start);
    cudaEventSynchronize(end);
    cudaEventElapsedTime(&elapsed2, start, end);

    cudaEventRecord(start, 0);

    glob<<<1, 1>>>(d_hist3);
    cudaThreadSynchronize();

    cudaEventRecord(end, 0);
    cudaEventSynchronize(start);
    cudaEventSynchronize(end);
    cudaEventElapsedTime(&elapsed3, start, end);

    cudaMemcpy(h_hist, d_hist, NBIN * sizeof(float),
    cudaMemcpyDeviceToHost);
    cudaMemcpy(h_hist2, d_hist2, NBIN * sizeof(float),
    cudaMemcpyDeviceToHost);
    cudaMemcpy(h_hist3, d_hist3, NBIN * sizeof(float),
    cudaMemcpyDeviceToHost);

    /* print output */
    for (int i = 0; i < NBIN; i++) {
        printf("atom: %10.2f shm: %10.2f glob:
    %10.2f¥n",h_hist[i],h_hist2[i],h_hist3[i]);
    }

    printf("%12s: %8.4f msec¥n", "One Thread", elapsed3);
    printf("%12s: %8.4f msec¥n", "Atomic", elapsed);
    printf("%12s: %8.4f msec¥n", "Sh_mem", elapsed2);

    return 0;
}

Upvotes: 4

Oleg Titov
Oleg Titov

Reputation: 1140

When writing GPU code you should avoid reading and writing to/from global memory. Global memory is very slow on GPU. That's the hardware feature. The only thing you can do is to make neighboring treads read/write in neighboring adresses in global memory. This will cause coalescing and speed up the process. But in general read your data once, process it and write it out once.

Upvotes: 1

Related Questions