go_krm
go_krm

Reputation: 1

MirroredStrategy output varies depending on the visible GPUs

I'm trying to run the following code

import tensorflow as tf


def test_multi_gpu_communication():
    # Check if multiple GPUs are available
    gpus = tf.config.list_physical_devices('GPU')
    if len(gpus) < 2:
        print("\033[91mNot enough GPUs available for testing.\033[0m")
        return

    # Create a mirrored strategy to distribute computations across GPUs
    strategy = tf.distribute.MirroredStrategy()
    # strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.HierarchicalCopyAllReduce())
    # strategy = tf.distribute.MirroredStrategy(cross_device_ops=tf.distribute.ReductionToOneDevice())
    added_value = 3
    # Define a simple computation that uses all available GPUs
    with strategy.scope():
        @tf.function
        def process_variable(x):
            return x + added_value
        
        @tf.function
        def all_reduce_sum(x):
            strategy.run(tf.print, args=("x: ", x,))
            per_replica_result = strategy.run(process_variable, args=(x,))
            strategy.run(tf.print, args=("per_replica_result: ", per_replica_result,))
            return strategy.reduce(tf.distribute.ReduceOp.SUM, per_replica_result, axis=None)

        # Create a tensor on each GPU
        x = tf.Variable([1, 2, 3], dtype=tf.float32)
        replicas = strategy.num_replicas_in_sync

        # Perform the all-reduce operation
        tf.print(x)
        print(x)
        result = all_reduce_sum(x)

        # Check if the result is as expected
        expected_result = tf.constant([replicas * (1 + added_value), replicas * (2 + added_value), replicas * (3 + added_value)], dtype=tf.float32)
        if not tf.reduce_all(tf.equal(result, expected_result)):
            tf.print("Variable: ", x)
            print("detailed variabled: ", x)
            tf.print("result: ", result)
            tf.print("expected_result: ", expected_result)
            raise ValueError("\033[91mMismatch between result and expected one\033[0m")

    print(f"\033[32mMulti-GPU communication test passed successfully with {replicas} replicas.\033[0m")


if __name__ == "__main__":
    test_multi_gpu_communication()

however, I the assertion fails in some cases depending on how I set CUDA_VISIBLE_DEVICES. for example: setting it to 0,1,2,3,4,5,6,7 works fine but setting it to 1,2,3,4,5,6,7,0 fails with some mirrored variables in the print are equal to [0,0,0] instead of [1,2,3].

I checked $nvidia-smi topo -m and it seems working fine when choosing gpus that are connected through SYS but sometimes fails when PIX is in the connections (probably related to the first GPU in the CUDA_VISIBILE_DEVICES list havin PIX in one of its connections)

        GPU0    GPU1    GPU2    GPU3    GPU4    GPU5    GPU6    GPU7    NIC0    NIC1    CPU Affinity NUMA Affinity
   GPU NUMA ID
GPU0     X      SYS     SYS     SYS     SYS     SYS     SYS     SYS     SYS     SYS     0-23,48-71   N/A
GPU1    SYS      X      PIX     PIX     SYS     SYS     SYS     SYS     SYS     SYS     0-23,48-71   N/A
GPU2    SYS     PIX      X      PIX     SYS     SYS     SYS     SYS     SYS     SYS     0-23,48-71   N/A
GPU3    SYS     PIX     PIX      X      SYS     SYS     SYS     SYS     SYS     SYS     0-23,48-71   N/A
GPU4    SYS     SYS     SYS     SYS      X      PIX     SYS     SYS     SYS     SYS     24-47,72-95  N/A
GPU5    SYS     SYS     SYS     SYS     PIX      X      SYS     SYS     SYS     SYS     24-47,72-95  N/A
GPU6    SYS     SYS     SYS     SYS     SYS     SYS      X      PIX     SYS     SYS     24-47,72-95  N/A
GPU7    SYS     SYS     SYS     SYS     SYS     SYS     PIX      X      SYS     SYS     24-47,72-95  N/A
NIC0    SYS     SYS     SYS     SYS     SYS     SYS     SYS     SYS      X      PIX
NIC1    SYS     SYS     SYS     SYS     SYS     SYS     SYS     SYS     PIX      X

I tested with tensorflow 2.16.1 and 2.6 and both have the same issue. Does anyone know a solution for this problem or a workaround?

I tried setting strategy to different values and also reorder the devices for the arguments of the mirrored strategy but still fails

Upvotes: 0

Views: 12

Answers (0)

Related Questions