Reputation: 341
I'm using MPI to parallelize a loop [0,max]. I want a master process (let's say process 0) to initially divide that loop into small sets of n tasks (n iterations) and then progressively affect a set of tasks to x slave processes whenever one finishes its previous work (previous set of task). In other words, I would like to implement a load balancing algorithm using MPI's blocking and/or non blocking Send/Receives but have no idea how I could proceed. Also, Is there a way to find the optimal size of one set of tasks (the n parameter) in function of "max" and "x" ?
Thanks a lot for your help.
Upvotes: 3
Views: 4679
Reputation: 341
I've finally found the following code from here which is basically a skeleton for dynamic load balancing based on MPI Master/Slave model, exactly what I was looking for. I still can't see how to divide optimally the initial work set though.
#include <mpi.h>
#define WORKTAG 1
#define DIETAG 2
main(argc, argv)
int argc;
char *argv[];
{
int myrank;
MPI_Init(&argc, &argv); /* initialize MPI */
MPI_Comm_rank(
MPI_COMM_WORLD, /* always use this */
&myrank); /* process rank, 0 thru N-1 */
if (myrank == 0) {
master();
} else {
slave();
}
MPI_Finalize(); /* cleanup MPI */
}
master()
{
int ntasks, rank, work;
double result;
MPI_Status status;
MPI_Comm_size(
MPI_COMM_WORLD, /* always use this */
&ntasks); /* #processes in application */
/*
* Seed the slaves.
*/
for (rank = 1; rank < ntasks; ++rank) {
work = /* get_next_work_request */;
MPI_Send(&work, /* message buffer */
1, /* one data item */
MPI_INT, /* data item is an integer */
rank, /* destination process rank */
WORKTAG, /* user chosen message tag */
MPI_COMM_WORLD);/* always use this */
}
/*
* Receive a result from any slave and dispatch a new work
* request work requests have been exhausted.
*/
work = /* get_next_work_request */;
while (/* valid new work request */) {
MPI_Recv(&result, /* message buffer */
1, /* one data item */
MPI_DOUBLE, /* of type double real */
MPI_ANY_SOURCE, /* receive from any sender */
MPI_ANY_TAG, /* any type of message */
MPI_COMM_WORLD, /* always use this */
&status); /* received message info */
MPI_Send(&work, 1, MPI_INT, status.MPI_SOURCE,
WORKTAG, MPI_COMM_WORLD);
work = /* get_next_work_request */;
}
/*
* Receive results for outstanding work requests.
*/
for (rank = 1; rank < ntasks; ++rank) {
MPI_Recv(&result, 1, MPI_DOUBLE, MPI_ANY_SOURCE,
MPI_ANY_TAG, MPI_COMM_WORLD, &status);
}
/*
* Tell all the slaves to exit.
*/
for (rank = 1; rank < ntasks; ++rank) {
MPI_Send(0, 0, MPI_INT, rank, DIETAG, MPI_COMM_WORLD);
}
}
slave()
{
double result;
int work;
MPI_Status status;
for (;;) {
MPI_Recv(&work, 1, MPI_INT, 0, MPI_ANY_TAG,
MPI_COMM_WORLD, &status);
/*
* Check the tag of the received message.
*/
if (status.MPI_TAG == DIETAG) {
return;
}
result = /* do the work */;
MPI_Send(&result, 1, MPI_DOUBLE, 0, 0, MPI_COMM_WORLD);
}
}
Upvotes: 2