JACK M
JACK M

Reputation: 2841

MPI: how to distinguish send and recv in MPI_Wait

Let's say I use PMPI to write a wrapper for MPI_Wait, which waits for an MPI send or receive to complete.

/* ================== C Wrappers for MPI_Wait ================== */
_EXTERN_C_ int PMPI_Wait(MPI_Request *request, MPI_Status *status);
_EXTERN_C_ int MPI_Wait(MPI_Request *request, MPI_Status *status) { 
    int _wrap_py_return_val = 0;

  _wrap_py_return_val = PMPI_Wait(request, status);
    return _wrap_py_return_val;
}

The wrapper is generated by this.

What I would like to do is:

/* ================== C Wrappers for MPI_Wait ================== */
_EXTERN_C_ int PMPI_Wait(MPI_Request *request, MPI_Status *status);
_EXTERN_C_ int MPI_Wait(MPI_Request *request, MPI_Status *status) { 
    int _wrap_py_return_val = 0;

  if(is a send request)
    printf("send\n");
  else // is a recv request
    printf("recv\n");

  _wrap_py_return_val = PMPI_Wait(request, status);
    return _wrap_py_return_val;
}

How to distinguish send and recv in Open MPI? Let's say I use Open MPI 3.0.0.

Upvotes: 3

Views: 697

Answers (1)

PilouPili
PilouPili

Reputation: 2699

I think since MPI_Request is opaque (I think in several release it is just an int) your only chance is to monitor yourself the created MPI_Request.

Here is a proposition (it is C++ oriented, because that's the way I like it) :

#include <mpi.h>
#include <iostream>
#include <map>
//To do opaque ordering
struct RequestConverter
{
      char data[sizeof(MPI_Request)];
      RequestConverter(MPI_Request * mpi_request)
      {
            memcpy(data, mpi_request, sizeof(MPI_Request));
      }
      RequestConverter()
      { }
      RequestConverter(const RequestConverter & req)
      {
            memcpy(data, req.data, sizeof(MPI_Request));
      }
      RequestConverter & operator=(const RequestConverter & req)
      {
            memcpy(data, req.data, sizeof(MPI_Request));
            return *this;
      }
      bool operator<(const RequestConverter & request) const
      {
            for(size_t i=0; i<sizeof(MPI_Request); i++)
            {
                  if(data[i]!=request.data[i])
                  {
                        return data[i]<request.data[i];
                  }
            }
            return false;
      }
};
//To store the created MPI_Request
std::map<RequestConverter, std::string> request_holder;

extern "C"
{

int MPI_Isend(
  void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request
)
{
      int ier = PMPI_Isend(buf, count, datatype, dest, tag, comm, request);
      request_holder[RequestConverter(request)]="sending";
      return ier;
}


int MPI_Irecv(
  void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request
)
{
      int ier = PMPI_Irecv(buf, count, datatype, dest, tag, comm, request);
      request_holder[RequestConverter(request)]="receiving";
      return ier;
}

int MPI_Wait(
  MPI_Request *request,
  MPI_Status * status
)
{
      int myid;
      MPI_Comm_rank(MPI_COMM_WORLD, &myid);
      std::cout << "waiting("<<myid<<")-> "<<request_holder[RequestConverter(request)]<<std::endl;
      request_holder.erase(RequestConverter(request));

      return PMPI_Wait(request, status);
}


}

RequestConverter is just a way of doing oblivious ordering to use a std::map

MPI_Isend stores the request in the global map, so does MPI_Irecv and MPI_Wait looks for the request and deletes it from the std::map.

Simple test gives :

int main(int argv, char ** args)
{
       int myid, numprocs;
      MPI_Init(&argv, &args);
      MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
      MPI_Comm_rank(MPI_COMM_WORLD, &myid);

      int i=123456789;
      MPI_Request request;
      MPI_Status status;
      if(myid==0)
      {
            MPI_Isend(&i, 1, MPI_INT, 1, 44444, MPI_COMM_WORLD, &request);
            MPI_Wait(&request, &status);
            std::cout << myid <<' '<<i << std::endl;
      }
      else if(myid==1)
      {
            MPI_Irecv(&i, 1, MPI_INT, 0, 44444, MPI_COMM_WORLD, &request);
            MPI_Wait(&request, &status);
            std::cout << myid <<' '<<i << std::endl;
      }
      int * sb = new int[numprocs];
      for(size_t i=0; i<numprocs; i++){sb[i]=(myid+1)*(i+1);}
      int * rb = new int[numprocs];
      MPI_Alltoall(sb, 1, MPI_INT, rb, 1, MPI_INT, MPI_COMM_WORLD  );
      MPI_Finalize();
}

output :

waiting(0)-> sending
0 123456789
waiting(1)-> receiving
1 123456789

However I just added a test with MPI_Alltoall to see if only the PMPI functions were called and it is the case. So no miracle there.

Upvotes: 1

Related Questions