Reputation: 31
Fermi generation GPU's single precision calculation should be 2 times faster than double precision. However, although I rewrite all declaration 'double' to 'float', I got no speed up. Is there any mistake ex. compile option etc..?
GPU:Tesla C2075 OS:win7 pro Compiler:VS2013(nvcc) CUDA:v.7.5 Command line:nvcc test.cu
I wrote test code:
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>
#include<cuda_runtime.h>
#include<cuda_profiler_api.h>
#include<device_functions.h>
#include<device_launch_parameters.h>
#define DOUBLE 1
#define MAXI 10
__global__ void Kernel_double(double*a,int nthreadx)
{
double b=1.e0;
int i;
i = blockIdx.x * nthreadx + threadIdx.x + 0;
a[i] *= b;
}
__global__ void Kernel_float(float*a,int nthreadx)
{
float b=1.0F;
int i;
i = blockIdx.x * nthreadx + threadIdx.x + 0;
a[i] *= b;
}
int main()
{
#if DOUBLE
double a[10];
for(int i=0;i<MAXI;++i){
a[i]=1.e0;
}
double*d_a;
cudaMalloc((void**)&d_a, sizeof(double)*(MAXI));
cudaMemcpy(d_a, a, sizeof(double)*(MAXI), cudaMemcpyHostToDevice);
#else
float a[10];
for(int i=0;i<MAXI;++i){
a[i]=1.0F;
}
float*d_a;
cudaMalloc((void**)&d_a, sizeof(float)*(MAXI));
cudaMemcpy(d_a, a, sizeof(float)*(MAXI), cudaMemcpyHostToDevice);
#endif
dim3 grid(2, 2, 1);
dim3 block(2, 2, 1);
clock_t start_clock, end_clock;
double sec_clock;
printf("[%d] start\n", __LINE__);
start_clock = clock();
for (int i = 1; i <= 100000; ++i){
#if DOUBLE
Kernel_double << < grid, block >> > (d_a, 2);
cudaMemcpy(a, d_a, sizeof(double)*(MAXI), cudaMemcpyDeviceToHost);
#else
Kernel_float << < grid, block >> > (d_a, 2);
cudaMemcpy(a, d_a, sizeof(float)*(MAXI), cudaMemcpyDeviceToHost);
#endif
}
end_clock = clock();
sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
printf("[%d] %f[s]\n", __LINE__, sec_clock);
printf("[%d] end\n", __LINE__);
return 0;
}
Upvotes: 1
Views: 1104
Reputation: 4860
Well, after some investigation, that's because you just perform a multiplication by the constant 1, which gets optimized to "do nothing" in the binary:
If instead you square the array (to prevent this trivial optimization), you get the following assembly:
and the performance gains are restored on the below(simplified) piece of code, in which i changed a few things:
here is the code:
#include<stdio.h>
#include<stdlib.h>
#include<math.h>
#include<time.h>
#include<conio.h>
#include<cuda_runtime.h>
#include<cuda_profiler_api.h>
#include<device_functions.h>
#include<device_launch_parameters.h>
#define DOUBLE float
#define ITER 10
#define MAXI 100000000
__global__ void kernel(DOUBLE*a)
{
for(int i = blockIdx.x * blockDim.x + threadIdx.x ; i < MAXI; i += blockDim.x * gridDim.x)
{
a[i] *= a[i];
}
}
int main()
{
DOUBLE* a = (DOUBLE*) malloc(MAXI*sizeof(DOUBLE));
for(int i=0;i<MAXI;++i)
{
a[i]=(DOUBLE)1.0;
}
DOUBLE* d_a;
cudaMalloc((void**)&d_a, sizeof(DOUBLE)*(MAXI));
cudaMemcpy(d_a, a, sizeof(DOUBLE)*(MAXI), cudaMemcpyHostToDevice);
clock_t start_clock, end_clock;
double sec_clock;
printf("[%d] start\n", __LINE__);
start_clock = clock();
for (int i = 1; i <= ITER; ++i){
kernel <<< 32, 256>>> (d_a);
}
cudaDeviceSynchronize();
end_clock = clock();
cudaMemcpy(a, d_a, sizeof(DOUBLE)*(MAXI), cudaMemcpyDeviceToHost);
sec_clock = (end_clock - start_clock) / (double)CLOCKS_PER_SEC;
printf("[%d] %f/%d[s]\n", __LINE__, sec_clock, CLOCKS_PER_SEC);
printf("[%d] end\n", __LINE__);
return 0;
}
(You'll notice I allocate a array of length 100M to get measurable performance.)
Upvotes: 7