--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/03/27 19:21:42 422 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/03/27 23:33:40 432 @@ -1,5 +1,5 @@ #ifdef IS_MPI - +#include #include #include #include @@ -133,28 +133,11 @@ int* mpiSimulation::divideLabor( void ){ // How many atoms does this processor have? old_atoms = AtomsPerProc[which_proc]; + add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); + new_atoms = old_atoms + add_atoms; // If the processor already had too many atoms, just skip this // processor and try again. - - if (old_atoms >= nTarget) continue; - - add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); - new_atoms = old_atoms + add_atoms; - - // If we can add this molecule to this processor without sending - // it above nTarget, then go ahead and do it: - - if (new_atoms <= nTarget) { - MolToProcMap[i] = which_proc; - AtomsPerProc[which_proc] += add_atoms; - for (j = 0 ; j < add_atoms; j++ ) { - AtomToProcMap[atomIndex] = which_proc; - atomIndex++; - } - done = 1; - continue; - } // If we've been through this loop too many times, we need // to just give up and assign the molecule to this processor @@ -179,6 +162,23 @@ int* mpiSimulation::divideLabor( void ){ continue; } + if (old_atoms >= nTarget) continue; + + // If we can add this molecule to this processor without sending + // it above nTarget, then go ahead and do it: + + if (new_atoms <= nTarget) { + MolToProcMap[i] = which_proc; + AtomsPerProc[which_proc] += add_atoms; + for (j = 0 ; j < add_atoms; j++ ) { + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; + } + done = 1; + continue; + } + + // The only situation left is where old_atoms < nTarget, but // new_atoms > nTarget. We want to accept this with some // probability that dies off the farther we are from nTarget @@ -234,6 +234,8 @@ int* mpiSimulation::divideLabor( void ){ MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, MPI_INT, 0); + + } @@ -253,6 +255,8 @@ int* mpiSimulation::divideLabor( void ){ } } + std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n"; + MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); @@ -289,7 +293,7 @@ int* mpiSimulation::divideLabor( void ){ local_index++; } } - + return globalIndex; }