Photos
Photos

Reputation: 41

How to Use CUDA Graphs with Interdependent Streams and Dynamic Parameters?

I have a CUDA program with multiple interdependent streams, and I want to convert it to use CUDA graphs to reduce launch overhead and improve performance. My program involves launching three kernels (kernel1, kernel2, and kernel3) across two streams (stream1 and stream2), with dependencies managed using CUDA events (event1 and event2). The parameters for these kernels are dynamic and need to be updated at each iteration.

In particular, the dynamic parameters include:

Here is the simplified structure of my code:

cudaStream_t stream1, stream2;
cudaEvent_t event1, event2;

for (int i = 0; i < 1024; i++) {
    if (i == 0) {
        kernel1<<<1, 512, 0, stream1>>>(i, dynamic_parameters);
        kernel2<<<1, 512, 0, stream2>>>(i, dynamic_parameters);
        kernel3<<<1, 512, 0, stream1>>>(i, dynamic_parameters);
        cudaEventRecord(event1, stream2);
        cudaEventRecord(event2, stream1);
    } else {
        cudaStreamWaitEvent(stream1, event1, 0);
        kernel1<<<1, 512, 0, stream1>>>(i, dynamic_parameters);
        cudaStreamWaitEvent(stream2, event2, 0);
        kernel2<<<1, 512, 0, stream2>>>(i, dynamic_parameters);
        cudaEventRecord(event1, stream2);
        kernel3<<<1, 512, 0, stream1>>>(i, dynamic_parameters);
        cudaEventRecord(event2, stream1);
    }
}

I've read NVIDIA blogs Employing CUDA Graphs in a Dynamic Environment and Constructing CUDA Graphs with Dynamic Parameters, but I couldn't figure out how to do it.

Specifically, I am struggling with:

Here's an outline of what I've tried so far to convert the else part to use CUDA graphs:

#include <cuda_runtime.h>
#include <vector>

// Assume these kernel functions are defined 
__global__ void kernel1(int i, int* static_params, int* dynamic_params);
__global__ void kernel2(int i, int* static_params, int* dynamic_params);
__global__ void kernel3(int i, int* dynamic_params);

void execute_kernels_with_graphs(int* matrix, int* array, int m, int n, int* static_params, int* dynamic_params) {
    cudaStream_t stream1, stream2;
    cudaEvent_t event1, event2;
    cudaGraph_t capturedGraph;
    cudaGraphExec_t graphExec;
    std::vector<cudaGraphNode_t> node_list;

    cudaStreamCreate(&stream1);
    cudaStreamCreate(&stream2);
    cudaEventCreate(&event1);
    cudaEventCreate(&event2);

    bool capturingGraph = true;
    bool updatingGraph = false;

    for (int i = 0; i < 1024; i++) {
        if (i == 0) {
            kernel1<<<1, 512, 0, stream1>>>(i, static_params, dynamic_params);
            kernel2<<<1, 512, 0, stream2>>>(i, static_params, dynamic_params);
            kernel3<<<1, 512, 0, stream1>>>(i, dynamic_params);
            cudaEventRecord(event1, stream2);
            cudaEventRecord(event2, stream1);
        } else {
            if (capturingGraph) {
                // Start capturing the graph
                cudaStreamBeginCapture(stream1, cudaStreamCaptureModeGlobal);

                // Wait for event and launch kernels
                cudaStreamWaitEvent(stream1, event1, 0);
                kernel1<<<1, 512, 0, stream1>>>(i, dynamic_params);
                cudaStreamWaitEvent(stream2, event2, 0);
                kernel2<<<1, 512, 0, stream2>>>(i, dynamic_params);
                cudaEventRecord(event1, stream2);
                kernel3<<<1, 512, 0, stream1>>>(i, dynamic_params);
                cudaEventRecord(event2, stream1);

                // Get the current stream capturing graph
                cudaStreamCaptureStatus capture_status;
                const cudaGraphNode_t* deps;
                size_t dep_count;
                cudaStreamGetCaptureInfo_v2(stream1, &capture_status, nullptr, &capturedGraph, &deps, &dep_count);

                // Manually add kernel nodes with dynamic parameters
                cudaGraphNode_t new_node;
                cudaKernelNodeParams dynamic_params_cuda = {};
                dynamic_params_cuda.func = (void*)kernel1;
                dynamic_params_cuda.gridDim = dim3(1);
                dynamic_params_cuda.blockDim = dim3(512);
                dynamic_params_cuda.sharedMemBytes = 0;
                dynamic_params_cuda.kernelParams = (void**)&dynamic_params;

                cudaGraphAddKernelNode(&new_node, capturedGraph, deps, dep_count, &dynamic_params_cuda);
                node_list.push_back(new_node);

                dynamic_params_cuda.func = (void*)kernel2;
                dynamic_params_cuda.gridDim = dim3(1);
                dynamic_params_cuda.kernelParams = (void**)&dynamic_params;
                cudaGraphAddKernelNode(&new_node, capturedGraph, deps, dep_count, &dynamic_params_cuda);
                node_list.push_back(new_node);

                dynamic_params_cuda.func = (void*)kernel3;
                dynamic_params_cuda.gridDim = dim3(1);
                dynamic_params_cuda.kernelParams = (void**)&dynamic_params;
                cudaGraphAddKernelNode(&new_node, capturedGraph, deps, dep_count, &dynamic_params_cuda);
                node_list.push_back(new_node);

                // End the capture and instantiate the graph
                cudaStreamEndCapture(stream1, &capturedGraph);
                cudaGraphInstantiate(&graphExec, capturedGraph, nullptr, nullptr, 0);

                capturingGraph = false;
                updatingGraph = true;
            } else if (updatingGraph) {
                // Update the dynamic parameters of the kernel nodes in the graph
                cudaKernelNodeParams dynamic_params_updated_cuda = {};
                dynamic_params_updated_cuda.func = (void*)kernel1;
                dynamic_params_updated_cuda.gridDim = dim3(1);
                dynamic_params_updated_cuda.blockDim = dim3(512);
                dynamic_params_updated_cuda.sharedMemBytes = 0;
                dynamic_params_updated_cuda.kernelParams = (void**)&dynamic_params;

                cudaGraphExecKernelNodeSetParams(graphExec, node_list[0], &dynamic_params_updated_cuda);

                dynamic_params_updated_cuda.func = (void*)kernel2;
                dynamic_params_updated_cuda.gridDim = dim3(1);
                dynamic_params_updated_cuda.kernelParams = (void**)&dynamic_params;
                cudaGraphExecKernelNodeSetParams(graphExec, node_list[1], &dynamic_params_updated_cuda);

                dynamic_params_updated_cuda.func = (void*)kernel3;
                dynamic_params_updated_cuda.gridDim = dim3(1);
                dynamic_params_updated_cuda.kernelParams = (void**)&dynamic_params;
                cudaGraphExecKernelNodeSetParams(graphExec, node_list[2], &dynamic_params_updated_cuda);
            }

            // Execute the graph
            cudaGraphLaunch(graphExec, stream1);
        }
    }

    cudaStreamSynchronize(stream1);
    cudaStreamSynchronize(stream2);

    cudaStreamDestroy(stream1);
    cudaStreamDestroy(stream2);
    cudaEventDestroy(event1);
    cudaEventDestroy(event2);
    cudaGraphDestroy(capturedGraph);
    cudaGraphExecDestroy(graphExec);
}

Despite following these steps, I'm unsure if I'm capturing the interdependent streams and dynamic parameters correctly. Can someone guide me on how to achieve this effectively using CUDA graphs?

Upvotes: 1

Views: 428

Answers (2)

einpoklum
einpoklum

Reputation: 131385

I am ignoring the specific code you posted and trying to address the main challenge:

The parameters for these kernels are dynamic and need to be updated at each iteration.

You can use the mechanism for setting parameters in CUDA graph instances, and specifically, this function:

_host__ ​cudaError_t cudaGraphExecKernelNodeSetParams ( 
    cudaGraphExec_t hGraphExec, 
    cudaGraphNode_t node, 
    const cudaKernelNodeParams* pNodeParams 
);

So, no more capturing for you... you construct the graph vertices and edges "manually" for the first loop iteration (i = 0), using the graph API - and save the cudaGraphNode_t handles for your three kernel launch nodes, as well as the cudaKernelNodeParams structures you used for the three kernels (with the cudaGraphAddKernelNode() function). Note that in those structures, kernel arguments are set using an array of void-pointers.

You then instantiate the graph, and start your loop over i.

At each loop iteration, you construct an array of void pointers, pointing at the i'th iteration's arguments for each of your kernels; you set the cudaKernelNodeParams::kernelParams field to that array, and call cudaGraphExecKernelNodeSetParams() - to commit the update to CUDA's graph instance. You then launch the instance like in your existing code (but once in each iteration).

Upvotes: 1

einpoklum
einpoklum

Reputation: 131385

I am ignoring the specific code you posted and trying to address the main challenge:

The parameters for these kernels are dynamic and need to be updated at each iteration.

One possible approach is to replace "call dynamism" with data dynamism:

Specifically:

  • Suppose your dynamic parameters are of types param1_t and param2_t.
  • Define struct dynamic_params_t { param1_t param1; param2_t param2; }
  • Allocate sizeof(dynamic_params_t) as managed memory (before creating/capturing the CUDA graph)
  • Have the kernels take a dynamic_params_t const * __restrict parameter - that address of the piece of memory you allocated.
  • Read the actual parameters via the pointer.
  • Use cudaStreamAddCallback to schedule the CPU-side code which updates the call arguments.
  • Use events to make sure the argument-setting code is executed after the previous call and before the next.

(The new events and callback should be captured in the graph).


If you're not familiar with managed memory, read Unified Memory for CUDA Beginners, and then use one of the following for the allocation:

Upvotes: 1

Related Questions