Reputation: 65
I'm writing a program in C++ with MPI library. There is a deadlock occurring only one nodes works! I am not using send or receive collective operations but only the two collective functions(MPI_Allreduce
and MPI_Bcast
).
If there is node waits other node to send something or receive I don't actually understand what cause this deadlock.
void ParaStochSimulator::first_reacsimulator() {
SimulateSingleRun();
}
double ParaStochSimulator::deterMinTau() {
//calcualte minimum tau for this process
l_nLocalMinTau = calc_tau(); //min tau for each node
MPI_Allreduce(&l_nLocalMinTau, &l_nGlobalMinTau, 1, MPI_DOUBLE, MPI_MIN, MPI_COMM_WORLD);
//min tau for all nodes
//check if I have the min value
if (l_nLocalMinTau <= l_nGlobalMinTau && m_nCurrentTime < m_nOutputEndPoint) {
FireTransition(m_nMinTransPos);
CalculateAllHazardValues();
}
return l_nGlobalMinTau;
}
void ParaStochSimulator::SimulateSingleRun() {
//prepare a run
PrepareRun();
while ((m_nCurrentTime < m_nOutputEndPoint) && IsSimulationRunning()) {
deterMinTau();
if (mnprocess_id == 0) { //master
SimulateSingleStep();
std::cout << "current time:*****" << m_nCurrentTime << std::endl;
broad_casting(m_nMinTransPos);
MPI_Bcast(&l_anMarking, l_nMinplacesPos.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
//std::cout << "size of mani place :" << l_nMinplacesPos.size() << std::endl;
}
}
MPI_Bcast(&l_anMarking, l_nMinplacesPos.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
PostProcessRun();
}
Upvotes: 0
Views: 229
Reputation: 1649
As your "master" process is executing A MPI_Bcast, all the other are still running your loop, then entering deterMinTau, then executing MPI_Allreduce.
This is a deadlock because you master node is waiting for all the nodes to execute a Brodcast and all the other nodes are waiting for the master node to execute a Reduce.
I believe what you are looking for is :
void ParaStochSimulator::SimulateSingleRun() {
//prepare a run
PrepareRun();
while ((m_nCurrentTime < m_nOutputEndPoint) && IsSimulationRunning()) {
//All the nodes reduce tau at the same time
deterMinTau();
if (mnprocess_id == 0) { //master
SimulateSingleStep();
std::cout << "current time:*****" << m_nCurrentTime << std::endl;
broad_casting(m_nMinTransPos);
//Removed bordcast for master here
}
//All the nodes broadcast at every loop iteration
MPI_Bcast(&l_anMarking, l_nMinplacesPos.size(), MPI_DOUBLE, 0, MPI_COMM_WORLD);
}
PostProcessRun();
}
Upvotes: 1