Lip
Lip

Reputation: 29

How to use WMMA functions?

I have run the example in the CUDA documentation,but I get unexpected results. So how to use the wmma function? Is my wmma::load_matrix_sync wrong? Or something else we should notice?...

WMMA_M,WMMA_N,WMMA_K = 16

__global__ void wmma_kernel(half *a, half *b, float *c, int matrix_size)
{
  //Declare the fragment
  wmma::fragment<wmma::matrix_a, WMMA_M, WMMA_N, WMMA_K, half, wmma::col_major> a_frag;
  wmma::fragment<wmma::matrix_b, WMMA_M, WMMA_N, WMMA_K, half, wmma::row_major> b_frag;
  wmma::fragment<wmma::accumulator, WMMA_M, WMMA_N, WMMA_K,float> acc_frag;

  //Load the matrix to fragment
  wmma::load_matrix_sync(a_frag, a, WMMA_M);
  wmma::load_matrix_sync(b_frag, b, WMMA_K);

  //perform mma
  wmma::fill_fragment(acc_frag, 0.0f);

  for(int i=0; i<1e4; i++)
    {
      wmma::mma_sync(acc_frag, a_frag, b_frag, acc_frag);
    }

  //store the result
  wmma::store_matrix_sync(c, acc_frag, WMMA_M, wmma::mem_row_major);
}

... I set the value of element (d_a[i] d_b[i]) to 1.0f, and c[i]=0.0f. After performing the wmma_kernel function, the c[i] is still 0.0f,and elapsedTime is also 0.0f.

matrix_size = 16 x 16

      //create the event
  cudaEvent_t start, stop;
  CUDA_CHECK_RETURN(cudaEventCreate(&start));
  CUDA_CHECK_RETURN(cudaEventCreate(&stop));

  //perform the wmma_kernel
  CUDA_CHECK_RETURN(cudaEventRecord(start));
  wmma_kernel<<<1,256>>>(d_a, d_b, d_c, matrix_size);

  CUDA_CHECK_RETURN(cudaEventRecord(stop));
  CUDA_CHECK_RETURN(cudaEventSynchronize(stop));
  //calculate the elapsed time
  float elapsedTime;
  cudaEventElapsedTime(&elapsedTime, start, stop);

  printf("Elapsed Time : %f\n",elapsedTime);

Upvotes: 2

Views: 3501

Answers (1)

Robert Crovella
Robert Crovella

Reputation: 151899

Cannot directly assign a value to a half variable on the host.

I would suggest switching to CUDA 10. It has made half datatype considerably easier to use.

However, the following example should work similarly whether using CUDA 9.2 or CUDA 10:

$ cat t304.cu
#include <mma.h>
#include <iostream>

using namespace nvcuda;

__global__ void wmma_ker(half *a, half *b, float *c) {
   // Declare the fragments
   wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::col_major> a_frag;
   wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag;
   wmma::fragment<wmma::accumulator, 16, 16, 16, float> c_frag;

   // Initialize the output to zero
   wmma::fill_fragment(c_frag, 0.0f);

   // Load the inputs
   wmma::load_matrix_sync(a_frag, a, 16);
   wmma::load_matrix_sync(b_frag, b, 16);

   // Perform the matrix multiplication
   wmma::mma_sync(c_frag, a_frag, b_frag, c_frag);

   // Store the output
   wmma::store_matrix_sync(c, c_frag, 16, wmma::mem_row_major);
}

int main(){

  half *d_a, *h_a, *d_b, *h_b;
  float *d_c, *h_c;
  h_c = new float[16*16];
  h_b = new half[16*16];
  h_a = new half[16*16];
  cudaMalloc(&d_a, 16*16*sizeof(half));
  cudaMalloc(&d_b, 16*16*sizeof(half));
  cudaMalloc(&d_c, 16*16*sizeof(float));
  for (int i = 0; i < 16*16; i++) {
    h_a[i] = 1.0f;
    h_b[i] = 1.0f;}
  cudaMemcpy(d_a, h_a, 16*16*sizeof(half), cudaMemcpyHostToDevice);
  cudaMemcpy(d_b, h_b, 16*16*sizeof(half), cudaMemcpyHostToDevice);
  wmma_ker<<<1,32>>>(d_a, d_b, d_c);
  cudaMemcpy(h_c, d_c, 16*16*sizeof(float), cudaMemcpyDeviceToHost);
  for (int i = 0; i < 16*16; i++) std::cout << h_c[i] << ",";
  std::cout << std::endl;
}
$ nvcc -arch=sm_70 -o t304 t304.cu
$ cuda-memcheck ./t304
========= CUDA-MEMCHECK
16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
========= ERROR SUMMARY: 0 errors
$

To get the kernel time measurement, you could use cudaEvent based timing, but it seems easier to me just to use nvprof:

$ nvprof ./t304
==28135== NVPROF is profiling process 28135, command: ./t304
16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,16,
==28135== Profiling application: ./t304
==28135== Profiling result:
            Type  Time(%)      Time     Calls       Avg       Min       Max  Name
 GPU activities:   42.97%  3.2320us         2  1.6160us  1.4080us  1.8240us  [CUDA memcpy HtoD]
                   28.52%  2.1450us         1  2.1450us  2.1450us  2.1450us  [CUDA memcpy DtoH]
                   28.51%  2.1440us         1  2.1440us  2.1440us  2.1440us  wmma_ker(__half*, __half*, float*)
      API calls:   98.42%  498.63ms         3  166.21ms  5.2170us  498.61ms  cudaMalloc
                    1.06%  5.3834ms       384  14.019us     347ns  568.79us  cuDeviceGetAttribute
                    0.38%  1.9473ms         4  486.83us  250.95us  1.1810ms  cuDeviceTotalMem
                    0.10%  493.31us         4  123.33us  109.62us  140.63us  cuDeviceGetName
                    0.01%  68.566us         1  68.566us  68.566us  68.566us  cudaLaunchKernel
                    0.01%  67.104us         3  22.368us  9.6850us  30.563us  cudaMemcpy
                    0.00%  22.628us         4  5.6570us  3.1910us  9.2200us  cuDeviceGetPCIBusId
                    0.00%  8.6020us         8  1.0750us     540ns  1.6570us  cuDeviceGet
                    0.00%  5.8370us         3  1.9450us     443ns  3.7760us  cuDeviceGetCount
                    0.00%  2.7590us         4     689ns     600ns     843ns  cuDeviceGetUuid

Upvotes: 4

Related Questions