davideAlbertini
davideAlbertini

Reputation: 93

Incorrect result calling cublasSgemm by a C host code

I get some weird numbers in return of a call to the cuBLAS library function cublasSgemm from a C host code. It does compile and run, but numbers in resulting matrix are incorrect.

The problem in calling these funcions by a C host code is that C language reads matrices in row-major order, while cuBLAS functions are written in FORTRAN that reads matrices in column-major order.

I tried many combination of parameters for the cublasSgemm but no one seems to work properly.

I need to perform matrix moltiplication between m1 and m2, so I passed m2 first and then m1 so the cublas function should read (m2)T and (m1)T, where T is the tranposed form; by doing so i should get as result (r)T = (m2 . m1)T. My C code should finally read (r)T as r, but I can't get correct numbers... here is the code :

cudaError_t vector_matrix_molt(float *m1, float *m2, float *r, int row1, int col1, int row2, int col2) {

    //Device Memory allocation
    float *d_m1;
    float *d_m2;
    float *d_r;
    float a = 1.0f;
    float b = 0.0f;
    int stride = 1;
    //CUDA stuff
    cublasHandle_t handle;
    cudaError_t cudaStatus;


    cudaStatus = cudaMalloc((void**)&d_m1, col1*row1*sizeof(float));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&d_m2, row2*col2*sizeof(float));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cudaStatus = cudaMalloc((void**)&d_r, row1*col2*sizeof(float));
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMalloc failed!");
        goto Error;
    }

    cublasCreate(&handle);

    // Copy Data to Device Memory
    cudaStatus = cudaMemcpy(d_m1, m1, row1*col1*sizeof(float), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy 1 failed!");
        goto Error;
    }

    cudaStatus = cudaMemcpy(d_m2, m2, row2*col2*sizeof(float), cudaMemcpyHostToDevice);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy 2 failed!");
        goto Error;
    }

    /*cublasStatus_t cublasSgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb,
    int m, int n, int k, const float *alpha, const float *A, int lda, const float *B, int ldb, const float *beta, float *C, int ldc
    */
    //Calling cuBLAS library function... 
    cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, col2, row1, col1, &a, d_m2, col2, d_m1, col1, &b, d_r, row1);

    // Check for any errors launching the kernel
    cudaStatus = cudaGetLastError();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "moltKernel launch failed: %s\n", cudaGetErrorString(cudaStatus));
        goto Error;
    }

    // cudaDeviceSynchronize waits for the kernel to finish, and returns
    // any errors encountered during the launch.
    cudaStatus = cudaDeviceSynchronize();
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching cublasSgemv!\n", cudaStatus);
        //printf("Cuda Error: %s\n", cudaGetErrorString(cudaStatus));
        goto Error;
    }

    // Copy output vector from GPU buffer to host memory.
    cudaStatus = cudaMemcpy(r, d_r, row1*col2* sizeof(float), cudaMemcpyDeviceToHost);
    if (cudaStatus != cudaSuccess) {
        fprintf(stderr, "cudaMemcpy 3 failed!");
        goto Error;
    }

Error:
    cudaFree(d_m1);
    cudaFree(d_m2);
    cudaFree(d_r);

    return cudaStatus;
}

Upvotes: 1

Views: 731

Answers (1)

kangshiyin
kangshiyin

Reputation: 9779

The only thing you need to change is the leading dim of r.

cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, col2, row1, col1, &a, d_m2, col2, d_m1, col1, &b, d_r, col2);

You could refer to this answer for a more detailed explanation.

Transpose matrix multiplication in cuBLAS howto

Upvotes: 1

Related Questions