Alex
Alex

Reputation: 23

Is it possible to use CUDA parallelizing this nested for loop?

I want to speed up this nested for loop, just start learn CUDA, how could I use CUDA to parallel this c++ code ?

#define PI 3.14159265
using namespace std;
int main()
{
    int nbint = 2;
    int hits = 20;
    int nbinp = 2;
    float _theta, _phi, _l, _m, _n, _k = 0, delta = 5;
    float x[20],y[20],z[20],a[20],t[20];
    for (int i = 0; i < hits; ++i)
    {
        x[i] = rand() / (float)(RAND_MAX / 100);
    }
    for (int i = 0; i < hits; ++i)
    {
        y[i] = rand() / (float)(RAND_MAX / 100);
    }
    for (int i = 0; i < hits; ++i)
    {
        z[i] = rand() / (float)(RAND_MAX / 100);
    }
    for (int i = 0; i < hits; ++i)
    {
        a[i] = rand() / (float)(RAND_MAX / 100);
    }
    float maxforall = 1e-6;
    float theta0;
    float phi0;
    for (int i = 0; i < nbint; i++)
    {
        _theta = (0.5 + i)*delta;
        for (int j = 0; j < nbinp; j++)
        {
            _phi = (0.5 + j)*delta / _theta;
            _l = sin(_theta* PI / 180.0)*cos(_phi* PI / 180.0);
            _m = sin(_theta* PI / 180.0)*sin(_phi* PI / 180.0);
            _n = cos(_theta* PI / 180.0);
            for (int k = 0; k < hits; k++)
            {
                _k = -(_l*x[k] + _m*y[k] + _n*z[k]);
                t[k] = a[k] - _k;   
            }

            qsort(t, 0, hits - 1);
            float max = t[0];
            for (int k = 0; k < hits; k++)
            {
                if (max < t[k])
                    max = t[k];
            }
            if (max > maxforall)
            {
                maxforall = max;
            }

        }
    }
    return 0;
}

I want to put innermost for loop and the sort part(maybe the whole nested loop) into parallel. After sort those array I found the maximum of all arrays. I use maximum to simplify the code. The reason I need sort is that maximum represent here is a continuous time information(all arrays contain time information). The sort part make those time from lowest to highest. Then I compare the a specific time interval(not a single value). The compare process almost like I choose maximum but with a continuous interval not a single value.

Upvotes: 1

Views: 1120

Answers (1)

m.s.
m.s.

Reputation: 16334

Your 3 nested loops calculate nbint*nbinp*hits values. Since each of those values is independent from each other, all values can be calculated in parallel.

You stated in your comments that you have a commutative and associative "filter condition" which reduces the output to a single scalar value. This can be exploited to avoid sorting and storing the temporary values. Instead, we can calculate the values on-the-fly and then apply a parallel reduction to determine the end result.

This can be done in "raw" CUDA, below I implemented this idea using thrust. The main idea is to run grid_op nbint*nbinp*hits times in parallel. In order to find out the three original "loop indices" from the single scalar index which is passed to grid_op the algorithm from this SO question is used.

thrust::transform_reduce performs the on-the-fly transformation and the subsequent parallel reduction (here thrust::maximum is used as a substitute).

#include <cmath>

#include <thrust/device_vector.h>
#include <thrust/functional.h>
#include <thrust/transform_reduce.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/tuple.h>

// ### BEGIN utility for demo ####
#include <iostream>
#include <thrust/random.h>

thrust::host_vector<float> random_vector(const size_t N)
{
    thrust::default_random_engine rng;
    thrust::uniform_real_distribution<float> u01(0.0f, 1.0f);
    thrust::host_vector<float> temp(N);
    for(size_t i = 0; i < N; i++) {
        temp[i] = u01(rng);
    }
    return temp;
}
// ### END utility for demo ####

template <typename... Iterators>
thrust::zip_iterator<thrust::tuple<Iterators...>> zip(Iterators... its)
{
    return thrust::make_zip_iterator(thrust::make_tuple(its...));
}

template <typename ZipIterator>
class grid_op
{
public:
    grid_op(ZipIterator zipIt, std::size_t dim1, std::size_t dim2) : zipIt(zipIt), dim1(dim1), dim2(dim2){}

    __host__ __device__
    float operator()(std::size_t index) const
    {
        const auto coords = unflatten_3d_index(index, dim1, dim2);
        const auto values = zipIt[thrust::get<2>(coords)]; 
        const float delta = 5;
        const float _theta = (0.5f + thrust::get<0>(coords))*delta;
        const float _phi = (0.5f + thrust::get<1>(coords))*delta / _theta;
        const float _l = sin(_theta* M_PI / 180.0)*cos(_phi* M_PI / 180.0);
        const float _m = sin(_theta* M_PI / 180.0)*sin(_phi* M_PI / 180.0);
        const float _n = cos(_theta* M_PI / 180.0);
        const float _k = -(_l*thrust::get<0>(values) + _m*thrust::get<1>(values) + _n*thrust::get<2>(values));
        return (thrust::get<3>(values) - _k);   
    }

private:
    __host__ __device__
    thrust::tuple<std::size_t, std::size_t, std::size_t>
    unflatten_3d_index(std::size_t index, std::size_t dim1, std::size_t dim2) const
    {
        // taken from https://stackoverflow.com/questions/29142417/4d-position-from-1d-index
        std::size_t x = index % dim1;
        std::size_t y = ( ( index - x ) / dim1 ) %  dim2;
        std::size_t z = ( ( index - y * dim1 - x ) / (dim1 * dim2) );
        return thrust::make_tuple(x,y,z);
    }

    ZipIterator zipIt;
    std::size_t dim1;
    std::size_t dim2;
};

template <typename ZipIterator>
grid_op<ZipIterator> make_grid_op(ZipIterator zipIt, std::size_t dim1, std::size_t dim2)
{
    return grid_op<ZipIterator>(zipIt, dim1, dim2);
}

int main()
{
    const int nbint = 3;
    const int nbinp = 4;
    const int hits = 20;
    const std::size_t N = nbint * nbinp * hits;

    thrust::device_vector<float> d_x = random_vector(hits);
    thrust::device_vector<float> d_y = random_vector(hits);
    thrust::device_vector<float> d_z = random_vector(hits);
    thrust::device_vector<float> d_a = random_vector(hits);

    auto zipIt = zip(d_x.begin(), d_y.begin(), d_z.begin(), d_a.begin());
    auto countingIt = thrust::counting_iterator<std::size_t>(0);
    auto unary_op = make_grid_op(zipIt, nbint, nbinp);
    auto binary_op = thrust::maximum<float>();
    const float init = 0;

    float max = thrust::transform_reduce(
        countingIt, countingIt+N,
        unary_op,
        init,
        binary_op
    );

    std::cout << "max = " << max << std::endl;
}

Upvotes: 1

Related Questions