Reputation: 131
Good night
I'm attending to a parallel programming course. The teacher gave us an assignment that involves domain partition for stencil calculations. For this type of calculations (finite difference) the most common way to parallelize a code is to partition the domain and create some ghost zones (halos).
For better understand the creation of ghost zones in MPI I programmed this simple example that initialize some arrays with inner values = 123 and boundary values 88. At the end of all communication, all ghost values should remain 8. In one node I'm getting 123 values.
Serial (no ghosts):
123 - 123 - ... - 123 - 123
Two partitions:
123 - 123 - ... - 88 ||| 88 - ... - 123 - 123
Three partitions:
123 - 123 - ... - 88 ||| 88 - ... - 123 - 123 - 88 ||| 88 - ... - 123 - 123
Aside from this bug, the main question here is about the correct approach to create and maintain ghost zones updated. Is there a cleaner solution for this aside from my messy if(myid == .... else if( myid = ... else type of implementation ? How people usually implement this kind of parallelism ?
#include<mpi.h>
#include<stdio.h>
#include<stdlib.h>
int WhichSize(int mpiId, int numProc, int tam);
int main(int argc, char *argv[]){
int i;
int localSize;
int numProc;
int myid;
int leftProc;
int rightProc;
int * myArray;
int fullDomainSize = 16;
MPI_Request request;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
// Lets get each partition size.
localSize = WhichSize(myid, numProc, fullDomainSize);
// Allocate arrays acording to proc number.
if(numProc == 1){
//printf("Allocating Array for serial usage\n");
myArray = (int*)malloc(localSize*sizeof(int));
} else if(numProc == 2) {
//printf("Allocating Array for 2 proc usage\n");
myArray = (int*)malloc((localSize+ 1)*sizeof(int));
} else if(numProc > 2) {
if (myid == 0 || myid == numProc - 1){
//printf("Allocating array for boundary nodes usage\n");
myArray = (int*)malloc((localSize+ 1)*sizeof(int));
} else {
//printf("Allocating array for inner nodes usage\n");
myArray = (int*)malloc((localSize+ 2)*sizeof(int));
}
}
// Now we will fill the arrays with a dummy value 123. For the
// boundaries (ghosts) we will fill than with 80. Just to differe
// ntiate.
if(numProc == 1){
//printf("----------------------------------------\n");
//printf("Filling the serial array with values... \n");
for (i = 0; i<localSize; i++){
myArray[i] = 123;
}
} else if(numProc == 2) {
////printf("------------------------------------------------\n");
//printf("Filling array for two proc usage with values... \n");
for (i = 0; i<localSize; i++){
myArray[i] = 123;
}
// ghost.
myArray[localSize+1] = 8;
} else if(numProc > 2) {
if (myid == 0 || myid == numProc - 1){
//printf("--------------------------------------------------\n");
//printf("Filling boundary node arrays usage with values... \n");
for (i = 0; i<localSize; i++){
myArray[i] = 123;
}
// ghosts.
myArray[localSize+1] = 8;
} else {
//printf("--------------------------------------------------\n");
//printf("Filling inner node arrays usage with values... \n");
for (i = 0; i<localSize; i++){
myArray[i] = 123;
}
// ghosts.
myArray[localSize+1] = 8;
myArray[0] = 8;
}
}
// Now lets comunicate the ghosts with MPI_Sendrecv().
if(numProc == 1){
//printf("Serial usage, no ghost to comunicate \n");
} else if(numProc == 2) {
if (myid == 0){
//printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
MPI_Isend(&myArray[localSize+1],
1,
MPI_INT,
1,
12345,
MPI_COMM_WORLD,
&request);
} else if (myid == 1) {
//printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
MPI_Irecv(&myArray[localSize+1],
1,
MPI_INT,
0,
12345,
MPI_COMM_WORLD,
&request);
}
} else if(numProc > 2) {
if (myid == 0){
rightProc = myid + 1;
if (myid == 0){
//printf("-------------------------------\n");
//printf("Communicating Boundary ghosts !\n");
//printf("-------------------------------\n");
//printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
MPI_Isend(&myArray[localSize+1],
1,
MPI_INT,
rightProc,
12345,
MPI_COMM_WORLD,
&request);
} else if (myid == rightProc) {
//printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
MPI_Irecv(&myArray[localSize+1],
1,
MPI_INT,
0,
12345,
MPI_COMM_WORLD,
&request);
}
} else if (myid == numProc - 1) {
leftProc = myid - 1;
if (myid == numProc - 1){
//printf("-------------------------------\n");
//printf("Communicating Boundary ghosts !\n");
//printf("-------------------------------\n");
////printf("Sending ghost value from proc %d to %d\n", myid, myid + 1);
MPI_Isend(&myArray[localSize+1],
1,
MPI_INT,
leftProc,
12345,
MPI_COMM_WORLD,
&request);
} else if (myid == leftProc) {
rightProc = myid + 1;
//printf("Receiving ghost value from proc %d to %d\n", myid-1, myid);
MPI_Irecv(&myArray[localSize+1],
1,
MPI_INT,
rightProc,
12345,
MPI_COMM_WORLD,
&request);
}
} else {
//printf("-------------------------------\n");
//printf("Communicating Inner ghosts baby\n");
//printf("-------------------------------\n");
leftProc = myid - 1;
rightProc = myid + 1;
// Communicate tail ghost.
if (myid == leftProc) {
MPI_Isend(&myArray[localSize+1],
1,
MPI_INT,
rightProc,
12345,
MPI_COMM_WORLD,
&request);
} else if (myid == rightProc){
MPI_Irecv(&myArray[localSize+1],
1,
MPI_INT,
leftProc,
12345,
MPI_COMM_WORLD,
&request);
}
// Communicate head ghost.
if (myid == leftProc) {
MPI_Isend(&myArray[0],
1,
MPI_INT,
rightProc,
12345,
MPI_COMM_WORLD,
&request);
} else if (myid == rightProc){
MPI_Irecv(&myArray[0],
1,
MPI_INT,
leftProc,
12345,
MPI_COMM_WORLD,
&request);
}
}
}
// Now I Want to see if the ghosts are in place !.
if (myid == 0){
printf("The ghost value is: %d\n", myArray[localSize + 1]);
} else if (myid == numProc - 1){
printf("The ghost value is: %d\n", myArray[0]);
} else {
printf("The head ghost is: %d\n", myArray[0]);
printf("The tail ghost is: %d\n", myArray[localSize + 1]);
}
MPI_Finalize();
exit(0);
}
int WhichSize(int mpiId, int numProc, int tam){
double resto;
int tamLocal;
tamLocal = tam / numProc;
resto = tam - tamLocal*numProc;
if (mpiId < resto) tamLocal = tamLocal + 1;
return tamLocal;
}
thank you guys !
Upvotes: 3
Views: 2160
Reputation: 74455
Halos can be elegantly implemented in MPI using Cartesian virtual topologies and the send-receive operation.
First of all, having lots of rank-dependent logic in conditional operators makes the code hard to read and understand. It is way better when the code is symmetric, i.e. when all ranks execute the same code. Corner cases can be taken care of using the MPI_PROC_NULL
null rank - a send to or receive from that rank results in a no-op. It is therefore enough to do:
// Compute the rank of the left neighbour
leftProc = myid - 1;
if (leftProc < 0) leftProc = MPI_PROC_NULL;
// Compute the rank of the right neighbour
rightProc = myid + 1;
if (rightProc >= numProc) rightProc = MPI_PROC_NULL;
// Halo exchange in forward direction
MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 0, // send last element to the right
&myArray[0], 1, MPI_INT, leftProc, 0, // receive into left halo
MPI_COMM_WORLD);
// Halo exchange in reverse direction
MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 0, // send first element to the left
&myArray[localSize+1], 1, MPI_INT, rightProc, 0, // receive into right halo
MPI_COMM_WORLD);
That code works for any rank, even for those at both ends - there either the source or the destination is the null rank and no actual transfer occurs in the corresponding direction. It also works with any number of MPI processes, from one to many. It requires that all ranks have halos on both sides, including those that don't really need it (the two corner ranks). One can store in those dummy halos useful things like boundary values (e.g. when solving PDEs) or simply live with the memory waste, which is usually negligible.
In your code, you use incorrectly non-blocking operations. Those are tricky and require care to be taken. MPI_Sendrecv
could and should be used instead. It performs both send and receive operations at the same time and thus prevents deadlocks (as long as there is a matching receive for each send).
If the domain is periodic, then the rank computation logic becomes simply:
// Compute the rank of the left neighbour
leftProc = (myid - 1 + numProc) % numProc;
// Compute the rank of the right neighbour
rightProc = (myid + 1) % numProc;
Instead of doing the arithmetic, one could create a Cartesian virtual topology and then use MPI_Cart_shift
to find the ranks of the two neighbours:
// Create a non-periodic 1-D Cartesian topology
int dims[1] = { numProc };
int periods[1] = { 0 }; // 0 - non-periodic, 1 - periodic
MPI_Comm cart_comm;
MPI_Cart_create(MPI_COMM_WORLD, 1, dims, periods, 1, &cart_comm);
// Find the two neighbours
MPI_Cart_shift(cart_comm, 0, 1, &leftProc, &rightProc);
The code for the halo exchange remains the same with the only difference that cart_comm
should replace MPI_COMM_WORLD
. MPI_Cart_shift
automatically takes care of the corner cases and will return MPI_PROC_NULL
when appropriate. The advantage of that method is that you can easily switch between non-periodic and periodic domains by simply flipping the values inside the periods[]
array.
The halos have to be updates as often as necessary, which depends on the algorithm. With most iterative schemes, the update must happen at the beginning of each iteration. One could reduce the communication frequency by introducing multi-level halos and using the values in the outer levels to compute the values in the inner ones.
To conclude, your main
function could be reduced to (without using a Cartesian topology):
int main(int argc, char *argv[]){
int i;
int localSize;
int numProc;
int myid;
int leftProc;
int rightProc;
int * myArray;
int fullDomainSize = 16;
MPI_Init(&argc, &argv);
MPI_Comm_size(MPI_COMM_WORLD, &numProc);
MPI_Comm_rank(MPI_COMM_WORLD, &myid);
// Compute neighbouring ranks
rightProc = myid + 1;
if (rightProc >= numProc) rightProc = MPI_PROC_NULL;
leftProc = myid - 1;
if (leftProc < 0) leftProc = MPI_PROC_NULL;
// Lets get each partition size.
localSize = WhichSize(myid, numProc, fullDomainSize);
// Allocate arrays.
myArray = (int*)malloc((localSize+ 2)*sizeof(int));
// Now we will fill the arrays with a dummy value 123. For the
// boundaries (ghosts) we will fill than with 80. Just to differe
// ntiate.
//printf("--------------------------------------------------\n");
//printf("Filling node arrays usage with values... \n");
for (i = 1; i<localSize; i++){
myArray[i] = 123;
}
// ghosts.
myArray[localSize+1] = 8;
myArray[0] = 8;
//printf("-------------------------------\n");
//printf("Communicating Boundary ghosts !\n");
//printf("-------------------------------\n");
//printf("Sending ghost value to the right\n");
MPI_Sendrecv(&myArray[localSize], 1, MPI_INT, rightProc, 12345,
&myArray[0], 1, MPI_INT, leftProc, 12345,
MPI_COMM_WORLD);
//printf("Sending ghost value to the left\n");
MPI_Sendrecv(&myArray[1], 1, MPI_INT, leftProc, 12345,
&myArray[localSize+1], 1, MPI_INT, rightProc, 12345,
MPI_COMM_WORLD);
// Now I Want to see if the ghosts are in place !.
printf("[%d] The head ghost is: %d\n", myid, myArray[0]);
printf("[%d] The tail ghost is: %d\n", myid, myArray[localSize + 1]);
MPI_Finalize();
return 0;
}
Upvotes: 10