footman
footman

Reputation: 33

Segmentation fault after scatterv

     /**
     * BLOCK_LOW
     * Returns the offset of a local array
     * with regards to block decomposition
     * of a global array.
     *
     * @param  (int) process rank
     * @param  (int) total number of processes
     * @param  (int) size of global array
     * @return (int) offset of local array in global array
     */
    #define BLOCK_LOW(id, p, n) ((id)*(n)/(p))

    /**
     * BLOCK_HIGH
     * Returns the index immediately after the
     * end of a local array with regards to
     * block decomposition of a global array.
     *
     * @param  (int) process rank
     * @param  (int) total number of processes
     * @param  (int) size of global array
     * @return (int) offset after end of local array
     */
    #define BLOCK_HIGH(id, p, n) (BLOCK_LOW((id)+1, (p), (n)))

    /**
     * BLOCK_SIZE
     * Returns the size of a local array
     * with regards to block decomposition
     * of a global array.
     *
     * @param  (int) process rank
     * @param  (int) total number of processes
     * @param  (int) size of global array
     * @return (int) size of local array
     */
    #define BLOCK_SIZE(id, p, n) ((BLOCK_HIGH((id), (p), (n))) - (BLOCK_LOW((id), (p), (n))))

    /**
     * BLOCK_OWNER
     * Returns the rank of the process that
     * handles a certain local array with
     * regards to block decomposition of a
     * global array.
     *
     * @param  (int) index in global array
     * @param  (int) total number of processes
     * @param  (int) size of global array
     * @return (int) rank of process that handles index
     */
    #define BLOCK_OWNER(i, p, n) (((p)*((i)+1)-1)/(n))



    /*Matricefilenames:
      small matrix A.bin of dimension 100 × 50
      small matrix B.bin of dimension 50 × 100
      large matrix A.bin of dimension 1000 × 500
      large matrix B.bin of dimension 500 × 1000

    An MPI program should be implemented such that it can
    • accept two file names at run-time,
    • let process 0 read the A and B matrices from the two data files,
    • let process 0 distribute the pieces of A and B to all the other processes,
    • involve all the processes to carry out the the chosen parallel algorithm
    for matrix multiplication C = A * B ,
    • let process 0 gather, from all the other processes, the different pieces
    of C ,
    • let process 0 write out the entire C matrix to a data file.
    */


    #include <stdio.h>
    #include <stdlib.h>
    #include <mpi.h>
    #include "mpi-utils.c"
    void read_matrix_binaryformat (char*, double***, int*, int*);
    void write_matrix_binaryformat (char*, double**, int, int);
    void create_matrix (double***,int,int);
    void matrix_multiplication (double ***, double ***, double ***,int,int, int);

    int main(int argc, char *argv[]) {
        int id,p; // Process rank and total amount of processes
        int rowsA, colsA, rowsB, colsB; // Matrix dimensions
        double **A; // Matrix A
        double **B; // Matrix B
        double **C; // Result matrix C : AB
        int local_rows; // Local row dimension of the matrix A
        double **local_A; // The local A matrix
        double **local_C;  // The local C matrix

        MPI_Init (&argc, &argv);
        MPI_Comm_rank (MPI_COMM_WORLD, &id);
        MPI_Comm_size (MPI_COMM_WORLD, &p);

        if(argc != 3) {
            if(id == 0) {
                printf("Usage:\n>> %s matrix_A matrix_B\n",argv[0]);
            }       
            MPI_Finalize();
            exit(1);
        }

        if (id == 0) {
            read_matrix_binaryformat (argv[1], &A, &rowsA, &colsA);
            read_matrix_binaryformat (argv[2], &B, &rowsB, &colsB);
        }

        if (p == 1) {
            create_matrix(&C,rowsA,colsB);
            matrix_multiplication (&A,&B,&C,rowsA,colsB,colsA);

            char* filename = "matrix_C.bin";
            write_matrix_binaryformat (filename, C, rowsA, colsB);
            free(A);
            free(B);
            free(C);
            MPI_Finalize();
            return 0;
        }


        // For this assignment we have chosen to bcast the whole matrix B:
        MPI_Bcast (&B, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); 
        MPI_Bcast (&colsA, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Bcast (&colsB, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Bcast (&rowsA, 1, MPI_INT, 0, MPI_COMM_WORLD);
        MPI_Bcast (&rowsB, 1, MPI_INT, 0, MPI_COMM_WORLD);

        local_rows = BLOCK_SIZE(id, p, rowsA);


        /*    SCATTER VALUES    */

        int *proc_elements = (int*)malloc(p*sizeof(int)); // amount of elements for each processor
        int *displace = (int*)malloc(p*sizeof(int)); // displacement of elements for each processor
        int i;
        for (i = 0; i<p; i++) {
            proc_elements[i] = BLOCK_SIZE(i, p, rowsA)*colsA;
            displace[i] = BLOCK_LOW(i, p, rowsA)*colsA;
        }

        create_matrix(&local_A,local_rows,colsA);

        MPI_Scatterv(&A[0],&proc_elements[0],&displace[0],MPI_DOUBLE,&local_A[0],
                     local_rows*colsA,MPI_DOUBLE,0,MPI_COMM_WORLD);

        /*    END  SCATTER  VALUES  */  

        create_matrix (&local_C,local_rows,colsB);
        matrix_multiplication (&local_A,&B,&local_C,local_rows,colsB,colsA);

        /*    GATHER VALUES    */

        MPI_Gatherv(&local_C[0], rowsA*colsB, MPI_DOUBLE,&C[0],
              &proc_elements[0],&displace[0],MPI_DOUBLE,0, MPI_COMM_WORLD);

        /*    END  GATHER VALUES  */

        char* filename = "matrix_C.bin";
        write_matrix_binaryformat (filename, C, rowsA, colsB);  

        free (proc_elements);
        free (displace);    
        free (local_A);
        free (local_C);
        free (A);
        free (B);
        free (C);   
        MPI_Finalize ();
        return 0;
    }

    void create_matrix (double ***C,int rows,int cols) {
        *C = (double**)malloc(rows*sizeof(double*));
        (*C)[0] = (double*)malloc(rows*cols*sizeof(double));
        int i;
        for (i=1; i<rows; i++)
            (*C)[i] = (*C)[i-1] + cols;
    }

    void matrix_multiplication (double ***A, double ***B, double ***C, int rowsC,int colsC,int colsA) {
        double sum;
        int i,j,k;
        for (i = 0; i < rowsC; i++) {
            for (j = 0; j < colsC; j++) {
                sum = 0.0;
                for (k = 0; k < colsA; k++) {
                    sum = sum + (*A)[i][k]*(*B)[k][j];
                }
                (*C)[i][j] = sum;
            }
        }
    }

    /* Reads a 2D array from a binary file*/ 
    void read_matrix_binaryformat (char* filename, double*** matrix, int* num_rows, int* num_cols) {
        int i;
        FILE* fp = fopen (filename,"rb");
        fread (num_rows, sizeof(int), 1, fp);
        fread (num_cols, sizeof(int), 1, fp);
        /* storage allocation of the matrix */
        *matrix = (double**)malloc((*num_rows)*sizeof(double*));
        (*matrix)[0] = (double*)malloc((*num_rows)*(*num_cols)*sizeof(double));
        for (i=1; i<(*num_rows); i++)
            (*matrix)[i] = (*matrix)[i-1]+(*num_cols);
        /* read in the entire matrix */
        fread ((*matrix)[0], sizeof(double), (*num_rows)*(*num_cols), fp);
        fclose (fp);
    }

    /* Writes a 2D array in a binary file */
    void write_matrix_binaryformat (char* filename, double** matrix, int num_rows, int num_cols) {
      FILE *fp = fopen (filename,"wb");
      fwrite (&num_rows, sizeof(int), 1, fp);
      fwrite (&num_cols, sizeof(int), 1, fp);
      fwrite (matrix[0], sizeof(double), num_rows*num_cols, fp);
      fclose (fp);
    }

My task is to do a parallel matrix multiplication of matrix A and B and gather the results in matrix C.

I am doing this by dividing matrix A in rowwise pieces and each process is going to use its piece to multiply matrix B, and get back its piece from the multiplication. Then I am going to gather all the pieces from the processes and put them together to matrix C.

I allready posted a similiar question, but this code is improved and I have progressed but I am still getting a segmentation fault after the scatterv call.

Upvotes: 2

Views: 831

Answers (2)

Hristo Iliev
Hristo Iliev

Reputation: 74395

Consider this an extended comment as Jonathan Dursi has already given a fairly elaborate answer. You matrices are really represented in a weird way but at least you followed the advice given to your other question and allocate space for them as contiguous blocks and not separately for each row.

Given that, you should replace:

MPI_Scatterv(&A[0],&proc_elements[0],&displace[0],MPI_DOUBLE,&local_A[0],
             local_rows*colsA,MPI_DOUBLE,0,MPI_COMM_WORLD);

with

MPI_Scatterv(A[0],&proc_elements[0],&displace[0],MPI_DOUBLE,local_A[0],
             local_rows*colsA,MPI_DOUBLE,0,MPI_COMM_WORLD);

A[0] already points to the beginning of the matrix data and there is no need to make a pointer to it. The same goes for local_A[0] as well as for the parameters to the MPI_Gatherv() call.

It has been said many times already - MPI doesn't do pointer chasing and only works with flat buffers.

I've also noticed another mistake in your code - memory for your matrices is not freed correctly. You are only freeing the array of pointers and not the matrix data itself:

free(A);

should really become

free(A[0]); free(A);

Upvotes: 2

Jonathan Dursi
Jonathan Dursi

Reputation: 50927

So I see a few problems right away:

    MPI_Bcast (&B, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD); 

Here, you're passing not a pointer to doubles, but a pointer to a pointer to a pointer to a double (B is defined as double **B) and you're telling MPI to follow that pointer and send 1 double from there. That is not going to work.

You might think that what you're accomplishing here is sending the pointer to the matrix, from which all tasks can read the array -- that doesn't work. The processes don't share a common memory space (that's why MPI is called distributed memory programming) and the pointer doesn't go anywhere. You're actually going to have to send the contents of the matrix,

    MPI_Bcast (&(B[0][0]), rowsB*colsB, MPI_DOUBLE, 0, MPI_COMM_WORLD); 

and you're going to have to make sure the other processes have correctly allocated memory for the B matrix ahead of time.

There's similar pointer problems elsewhere:

    MPI_Scatterv(&A[0], ..., &local_A[0]

Again, A is a pointer to a pointer to doubles (double **A) as is local_A, and you need to be pointing MPI to pointer to doubles for this to work, something like

    MPI_Scatterv(&(A[0][0]), ..., &(local_A[0][0])

that error seems to be present in all the communications routines.

Remember that anything that looks like (buffer, count, TYPE) in MPI means that the MPI routines follow the pointer buffer and send the next count pieces of data of type TYPE there. MPI can't follow pointers within the buffer you sent becaue in general it doens't know they're there. It just takes the next (count * sizeof(TYPE)) bytes from pointer buffer and does whatever communications is appropriate with them. So you have to pass it a pointer to a stream of data of type TYPE.

Having said all that, it would be a lot easier to work with you on this if you had narrowed things down a bit; right now the program you've posted includes a lot of I/O stuff that's irrelevant, and it means that no one can just run your program to see what happens without first figuring out the matrix format and then generating two matrices on their own. When posting a question about source code, you really want to post a (a) small bit of source which (b) reproduces the problem and (c) is completely self-contained.

Upvotes: 3

Related Questions