Walker
Walker

Reputation: 341

MPI : Percolate a subarray from slave processes to update main array in root/master process

I apologize for the rather long code. Am new in MPI, and am facing issues when I try to parallelize a sorting algorithm. I basically have a huge array of ints , which I have to sort faster by dividing into equal parts to slave processes. Once the slave processes are done, they have to return their sub-arrays to the root process (merged ) which can do further processing. Hence : the solution has to be the global array patched with semi-sorted sub-arrays . I have already tried looking at previous questions in the forum but am still grounded the whole day.Please dont be too hard on me since am still new in MPI.

void Batcher( char *inputFile,   string outputFile, int N  ){

   int initialised;
   MPI_Initialized(&initialised);
   //int rank;
   if (!initialised)
   {
      MPI_Init(NULL, NULL);
      atexit(finalized);
   }
   //
   //BEGIN : FILE READING FOR ARRAY
   //
   //Get number of processes
   int world_size;
   int root = 0;
   MPI_Comm_size(MPI_COMM_WORLD, &world_size);
   N=world_size;
   //Get the rank of the process
   int rank;
   MPI_Comm_rank(MPI_COMM_WORLD , &rank);

   int *global_array;
   int *sub_array_per_process;
   int element_count;

   if ( rank == root ){
       // ifstream input;
       ofstream output;
       std::ifstream input( inputFile, std::ifstream::in);
       //get number of integers in the file  so they may be populates to array
       int counter=0;
       string line;
       while( std::getline(input,line)){
          if ( line !="")          ++counter;
       }

       int numbers[counter];
       global_array = new int [counter];
       global_array = &numbers[0];


       int current;
       int index = 0;

       //reset read pointer to beginning of input file
       input.clear();
       input.seekg(0, ios::beg);
      //get number from inputfile and add to array numbers 
       while( input >> current){
          numbers[index]=current;
          index++;
          // cout<<((int) a);
       }
      global_array = numbers;

      for(int i=0; i<counter; i++)
          global_array[i]=numbers[i];//<<endl;

       for(int i=0; i<counter; i++)
          cout<<"global "<< global_array[i]<< " numbers " <<numbers[i] <<endl;


       element_count = counter; 
       input.close();


       /* 
       Send tasks to slaves  */

        int NON = element_count / (N - 1 );
        for(int i=root+1; i< world_size ; i++){
           int start = get_preceeding_ranks(i )*NON;
           //     cout<<"start is "<< start <<endl;
           MPI_Send(&global_array[start], NON, MPI_INT,i, 1 ,MPI_COMM_WORLD);

       }


      MPI_Status status;
      int temp[counter];

   } // End root process operation

    MPI_Bcast(&element_count, 1, MPI_INT,root,MPI_COMM_WORLD );

    int NON = element_count / (N - 1 );
     //Recieve local su-job from root
    if ( rank != root ){
       MPI_Status status;
       MPI_Recv(sub_array_per_process,NON, MPI_INT , root, 1 , MPI_COMM_WORLD ,&status );

    }
    int n_per_small_chunk = sizeof(sub_array_per_process) / sizeof(sub_array_per_process[0]) ;

    oddEvenMergeSort(sub_array_per_process,0, n_per_small_chunk);

    cout<<"After sorting processwise sorting.... "<<endl;
    if ( rank != root ){
         for(int i=0;i<NON;i++)
          cout<<"rank  : "<< rank <<"  data = "<< sub_array_per_process[ i] << endl;
    }
 //   MPI_Bcast(global_array, element_count , MPI_INT,root,MPI_COMM_WORLD );
 //sub_array_per_process = new int[2];

    MPI_Barrier(MPI_COMM_WORLD  ) ;

   if (rank == root ){
       int start ;
       int sender = -1;
       for ( int i= 0; i< N; i++){
            start = get_preceeding_ranks(i+1 )*NON;
             MPI_Status status;
       cout<<" start = "<<start<<endl;
       sender = i+1;
           MPI_Recv(&global_array[start], NON  , MPI_INT , sender , 1, MPI_COMM_WORLD ,&status );
           cout<<" Received  " << global_array[start] <<" from " << sender <<   endl
           ;
           // MPI_Bcast(global_array, elem , MPI_INT,root,MPI_COMM_WORLD );

        }
         for ( int j=0; j< element_count; j++ )  
             cout<<" Received  " << global_array[j] <<" from " <<sender <<   endl;

   }
   else    {  //Send to root your sub-array..
     // for (int j=1 ; j < N; i++ )
         for ( int i=0;i<NON; i++)
             MPI_Send(&sub_array_per_process[i], NON , MPI_INT,0, 1 ,MPI_COMM_WORLD);
  }

   MPI_Barrier(MPI_COMM_WORLD  ) ;

   for ( int j=0; j< element_count; j++ )  
     cout<<" iOutside  " << global_array[j] <<" from "<<   endl;



   MPI_Finalize();

}

int main() {
  string output;//Dummy

  char* inputFile ="inputsmall.txt";
  int N=0; //Dummy


  Batcher( inputFile,  output, N  );

  return 0 ;
}

Since the code is long , specifically am stuck here :-

   if (rank == root ){
           int start ;
           int sender = -1;
           for ( int i= 0; i< N; i++){ //get all submitted sub-arrays from slaves. 
                start = get_preceeding_ranks(i+1 )*NON;
                 MPI_Status status;
           cout<<" start = "<<start<<endl;
           sender = i+1;
               MPI_Recv(&global_array[start], NON  , MPI_INT , sender , 1, MPI_COMM_WORLD ,&status );
               cout<<" Received  " << global_array[start] <<" from " << sender <<   endl
               ;


            }
             for ( int j=0; j< element_count; j++ )  
                 cout<<" Received  " << global_array[j] <<" from " <<sender <<   endl;

       }
       else    {  //Send to root your sub-array..
                for ( int i=0;i<NON; i++)
                 MPI_Send(&sub_array_per_process[i], NON , MPI_INT,0, 1 ,MPI_COMM_WORLD);
      }

The output I get is :-

     start = 2
 Received  2 from 1
 start = 4
 Received  3 from 2
 start = 6
 Received  4 from 3
 start = 8
[tux2:25061] *** Process received signal ***
[tux2:25061] Signal: Segmentation fault (11)
[tux2:25061] Signal code:  (128)
[tux2:25061] Failing at address: (nil)
--------------------------------------------------------------------------
mpirun noticed that process rank 0 with PID 25061 on node tux2 exited on signal 11 (Segmentation fault).
--------------------------------------------------------------------------

Upvotes: 3

Views: 521

Answers (1)

Pooja Nilangekar
Pooja Nilangekar

Reputation: 1479

From your code I can see that when the rank is equal to root, you're receiving the sorted arrays from the slave processes. The problem lies in your for loop. You're trying to receive from from all processes from i+1 to N, while your Communicator has only N processes (This is because you've assigned N = world_size). Therefore your slaves are 1..N-1. Hence you need to change the for statement as follows:

for ( int i= 0; i< (N-1); i++){ //To receive data from process i=1 to i=N-1

On another note your code is difficult to debug and manage. You will find it easy to use MPI_Scatter and MPI_Gather instead. Take a look at this tutorial

Upvotes: 3

Related Questions