Reputation: 90
I'm trying to passing atomicAdd function into another function as template parameter.
Here is my Kernel1:
template<typename T, typename TAtomic>
__global__ void myfunc1(T *address, TAtomic atomicFunc) {
atomicFunc(address, 1);
}
Try 1:
myfunc1<<<1,1>>>(val.dev_ptr, atomicAdd);
It does not work due to the compiler cannot match the expected function signature.
Try 2: Firstly, I wrap the atomicAdd into a custom function called MyAtomicAdd.
template<typename T>
__device__ void MyAtomicAdd(T *address, T val) {
atomicAdd(address, val);
}
Then, I defined a function pointer called "TAtomic" and declare the TAtomic as template parameter.
typedef void (*TAtomic)(float *,float);
template<typename T, TAtomic atomicFunc>
__global__ void myfunc2(T *address) {
atomicFunc(address, 1);
}
myfunc2<float, MyAtomicAdd><<<1,1>>>(dev_ptr);
CUDA_CHECK(cudaDeviceSynchronize());
Actually, Try 2 works. But, I don't want to use typedef. I need something more generic.
Try 3: Just passing MyAtomicAdd to myfunc1.
myfunc1<<<1,1>>>(dev_ptr, MyAtomicAdd<float>);
CUDA_CHECK(cudaDeviceSynchronize());
The compiler can compile the code. But when I run the program, a error reported:
"ERROR in /home/liang/groute-dev/samples/framework/pagerank.cu:70: invalid program counter (76)"
I just wondering, why try 3 doesn't work? And any simple or gentle way exists to implement this requirement? Thank you.
Upvotes: 0
Views: 452
Reputation: 151849
Try 3 doesn't work because you are attempting to take the address of a __device__
function in host code, which is illegal in CUDA:
myfunc1<<<1,1>>>(dev_ptr, MyAtomicAdd<float>);
^
effectively a function pointer - address of a __device__ function
Such usage attempts in CUDA will resolve to some sort of an "address" - but it is garbage, so when you try to use it as an actual function entry point in device code, you get the error you encountered: invalid program counter
(or in some cases, just illegal address
).
You can make your Try 3 method work (without a typedef
) by wrapping the intrinsic in a functor instead of a bare __device__
function:
$ cat t48.cu
#include <stdio.h>
template<typename T>
__device__ void MyAtomicAdd(T *address, T val) {
atomicAdd(address, val);
}
template <typename T>
struct myatomicadd
{
__device__ T operator()(T *addr, T val){
return atomicAdd(addr, val);
}
};
template<typename T, typename TAtomic>
__global__ void myfunc1(T *address, TAtomic atomicFunc) {
atomicFunc(address, (T)1);
}
int main(){
int *dev_ptr;
cudaMalloc(&dev_ptr, sizeof(int));
cudaMemset(dev_ptr, 0, sizeof(int));
// myfunc1<<<1,1>>>(dev_ptr, MyAtomicAdd<int>);
myfunc1<<<1,1>>>(dev_ptr, myatomicadd<int>());
int h = 0;
cudaMemcpy(&h, dev_ptr, sizeof(int), cudaMemcpyDeviceToHost);
printf("h = %d\n", h);
return 0;
}
$ nvcc -arch=sm_35 -o t48 t48.cu
$ cuda-memcheck ./t48
========= CUDA-MEMCHECK
h = 1
========= ERROR SUMMARY: 0 errors
$
We can realize a slightly simpler version of this as well, letting the functor template type be inferred from the kernel template type:
$ cat t48.cu
#include <stdio.h>
struct myatomicadd
{
template <typename T>
__device__ T operator()(T *addr, T val){
return atomicAdd(addr, val);
}
};
template<typename T, typename TAtomic>
__global__ void myfunc1(T *address, TAtomic atomicFunc) {
atomicFunc(address, (T)1);
}
int main(){
int *dev_ptr;
cudaMalloc(&dev_ptr, sizeof(int));
cudaMemset(dev_ptr, 0, sizeof(int));
myfunc1<<<1,1>>>(dev_ptr, myatomicadd());
int h = 0;
cudaMemcpy(&h, dev_ptr, sizeof(int), cudaMemcpyDeviceToHost);
printf("h = %d\n", h);
float *dev_ptrf;
cudaMalloc(&dev_ptrf, sizeof(float));
cudaMemset(dev_ptrf, 0, sizeof(float));
myfunc1<<<1,1>>>(dev_ptrf, myatomicadd());
float hf = 0;
cudaMemcpy(&hf, dev_ptrf, sizeof(float), cudaMemcpyDeviceToHost);
printf("hf = %f\n", hf);
return 0;
}
$ nvcc -arch=sm_35 -o t48 t48.cu
$ cuda-memcheck ./t48
========= CUDA-MEMCHECK
h = 1
hf = 1.000000
========= ERROR SUMMARY: 0 errors
$
More treatments of the use of device function pointers in CUDA are linked to this answer.
Upvotes: 1