user1497119
user1497119

Reputation: 473

unexpected deadlock in MPI

I hope someone can help me. My code:

void process(int myid, int numprocs)
{
    int i,j, anzahl, rest;
    MPI_Status stat;

    meta = (int *)malloc(3 * sizeof(int));
    if(myid == 0)
    {
        meta[0] = ASpalten;
        meta[1] = AZeilen;
        meta[2] = BSpalten;

        for (i = 0; i < numprocs; i++) //masternode distributes matrix A to every single core
        {     
            MPI_Send(&meta[0], 3, MPI_INT, i, TAG, MPI_COMM_WORLD);
            printf("%d: debug04\n", myid);
            MPI_Send(&MA[0], ASpalten*AZeilen, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD);
            printf("%d: debug05\n", myid);
            MPI_Send(&MB[0], ASpalten*BSpalten, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD);
            printf("%d: debug06\n", myid);
        }
   }
   else
   {
       MPI_Recv(meta, 3, MPI_INT, 0, TAG, MPI_COMM_WORLD, &stat);
       printf("%d: debug01\n", myid);
       ASpalten = meta[0];
       AZeilen = meta[1];
       BSpalten=meta[2];
       printf("%d: debug02\n", myid);
       MA = (double*)malloc(ASpalten*AZeilen*sizeof(double));
       MB = (double*)malloc(ASpalten*BSpalten*sizeof(double));
       MR = (double*)malloc(AZeilen*BSpalten*sizeof(double));
       MPI_Recv(MA, ASpalten*AZeilen, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD, &stat);
       MPI_Recv(MB, ASpalten*BSpalten, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD, &stat);
       printf("%d: debug03\n", myid);
       // printf("%d: %f\n", myid, *(MA + _index(1, 1, ASpalten))); //funktioniert
    }

The Datatypes:

int ASpalten;
int AZeilen;
int BSpalten;
int *meta; //used to transfer meta data in 1 send
double *MA; //Matrix A
double *MB; //Matrix B

The program is supposed to multiply 2 matrices using MPI. My sample matrix proves that the code is likely valid and I also get this running for up to 130 * 90 matrices (maybe more maybe less), but anyway, as soon, as the number increases, I am getting a possible deadlock: the console prints out "debug4" and that's it. I would be very grateful, if anyone has a clue what is going wrong in my program. I already tried to use MPI_INTEGER instead of MPI_INT, but there is no difference. Any help would be appreciated. The output of the console when using very tiny matrices (PS, I already tried to execute my testcases in different orders too and modified existing ones):

Testcase1 MPI:
0: debug04
0: debug05
0: debug06
0: debug04
1: debug01
1: debug02
0: debug05
1: debug03
1: debugx1
0: debug06
0: debug04......

Upvotes: 0

Views: 105

Answers (1)

francis
francis

Reputation: 9817

It seems that process 0 sends messages to proc 0 and proc 0 does receive them.

I modified to

  for(i=1;i<numprocs;i++)

to remove the deadlock.

#include <stdio.h>
#include <time.h>
#include <stdlib.h>
#include <string.h>
#include "mpi.h"


int ASpalten;
int AZeilen;
int BSpalten;
int *meta; //used to transfer meta data in 1 send
double *MA; //Matrix A
double *MB; //Matrix B
double *MR; //Matrix B

void process(int myid, int numprocs){
    int i,j, anzahl, rest;
    int TAG=0;
    MPI_Status stat;
    meta=(int*)malloc(3*sizeof(int));
    if(myid == 0)
    {meta[0]=ASpalten;
    meta[1]=AZeilen;
    meta[2]=BSpalten;
    for (i=1; i<numprocs; i++)//masternode distributes matrix A to every single core
    {     
        MPI_Send(&meta[0], 3, MPI_INT, i, TAG, MPI_COMM_WORLD);
        printf("%d: debug04\n", myid);
        MPI_Send(&MA[0], ASpalten*AZeilen, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD);
        printf("%d: debug05\n", myid);
        MPI_Send(&MB[0], ASpalten*BSpalten, MPI_DOUBLE, i, TAG, MPI_COMM_WORLD);
        printf("%d: debug06\n", myid);
    }
    }
    else
    {
        MPI_Recv(meta, 3, MPI_INT, 0, TAG, MPI_COMM_WORLD, &stat);
        printf("%d: debug01\n", myid);
        ASpalten=meta[0];
        AZeilen=meta[1];
        BSpalten=meta[2];
        printf("%d: debug02\n", myid);
        MA = (double*)malloc(ASpalten*AZeilen*sizeof(double));
        MB = (double*)malloc(ASpalten*BSpalten*sizeof(double));
        MR = (double*)malloc(AZeilen*BSpalten*sizeof(double));
        MPI_Recv(MA, ASpalten*AZeilen, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD, &stat);
        MPI_Recv(MB, ASpalten*BSpalten, MPI_DOUBLE, 0, TAG, MPI_COMM_WORLD, &stat);
        printf("%d: debug03\n", myid);
        // printf("%d: %f\n", myid, *(MA + _index(1, 1, ASpalten))); //funktioniert
    }
}

int main(int argc,char *argv[])
{
    int rank, size;


    MPI_Init(&argc, &argv);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    ASpalten=130;
    AZeilen=90;
    BSpalten=200;
    if(rank==0){

    }

    MA = (double*)malloc(ASpalten*AZeilen*sizeof(double));
    MB = (double*)malloc(ASpalten*BSpalten*sizeof(double));
    MR = (double*)malloc(AZeilen*BSpalten*sizeof(double));
    process(rank,size);
    MPI_Finalize();
    return 0;
}

Bye,

Francis

Upvotes: 1

Related Questions