Manolete
Manolete

Reputation: 3517

CUDA threads for inner loop

I've got this kernel

__global__ void kernel1(int keep, int include, int width, int* d_Xco, 
              int* d_Xnum, bool* d_Xvalid, float* d_Xblas)
{

  int i, k;  
  i = threadIdx.x + blockIdx.x * blockDim.x;

  if(i < keep){

    for(k = 0; k < include ; k++){

      int val = (d_Xblas[i*include + k] >= 1e5);
      int aux = d_Xnum[i];

      d_Xblas[i*include + k] *= (!val);
      d_Xco[i*width + aux] = k;
      d_Xnum[i] +=val;
      d_Xvalid[i*include + k] = (!val);
    }
  }
}

launched with

int keep = 9000;
int include = 23000;
int width = 0.2*include;

int threads = 192;
int blocks = keep+threads-1/threads;
kernel1 <<< blocks,threads  >>>( keep, include, width,
                                 d_Xco, d_Xnum, d_Xvalid, d_Xblas );

This kernel1 works fine but it is obviously not totally optimized. I thought it would be straight forward to eliminate the inner loop k but for some reason it doesn't work fine. My first idea was:

__global__ void kernel2(int keep, int include, int width, 
               int* d_Xco, int* d_Xnum, bool* d_Xvalid, 
               float* d_Xblas)
{

  int i, k;  
  i = threadIdx.x + blockIdx.x * blockDim.x;
  k = threadIdx.y + blockIdx.y * blockDim.y;

  if((i < keep)  && (k < include) ) {

      int val = (d_Xblas[i*include + k] >= 1e5);
      int aux = d_Xnum[i];
      d_Xblas[i*include + k] *= (float)(!val);
      d_Xco[i*width + aux] = k;
      atomicAdd(&d_Xnum[i], val);
      d_Xvalid[i*include + k] = (!val);
  }
}

launched with a 2D grid:

int keep = 9000;
int include = 23000;
int width = 0.2*include;

int th = 32;
dim3 threads(th,th);
dim3 blocks ((keep+threads.x-1)/threads.x, (include+threads.y-1)/threads.y);
kernel2 <<< blocks,threads >>>( keep, include, width, d_Xco, d_Xnum, 
                               d_Xvalid, d_Xblas );

Although I believe the idea is fine, it does not work and I am running out of ideas here. Could you please help me out here? I also think the problem could be in d_Xco which stores the position k in a smaller array and push them to the beginning of the array , so the order matters.

d_Xco
-------------------------------
| 2|3 |15 |4 |5 |5 | | | | | | .......
-------------------------------

Upvotes: 0

Views: 387

Answers (1)

harrism
harrism

Reputation: 27899

In the original code, you have

for(k = 0; k < include ; k++){
  ...
  int aux = d_Xnum[i];
  ...
  d_Xco[i*width + aux] = k;
  ...
}

The index to the d_Xco array is not dependent on k and therefore writing to it each iteration is redundant. The final value will always be include-1. So, replace these two lines inside the k loop with one line outside the k loop:

 d_Xco[i*width + d_Xnum[i]] = include - 1;

Once you do that, when you parallelize the k loop you will no longer have the race condition you currently have when many k threads assign different values to the same location in d_Xco concurrently (no guarantee of ordering).

Upvotes: 1

Related Questions