--- trunk/src/brains/SimCreator.cpp 2012/08/31 21:16:10 1793 +++ trunk/src/brains/SimCreator.cpp 2012/10/01 18:21:15 1801 @@ -506,7 +506,7 @@ namespace OpenMD { int nGlobalMols = info->getNGlobalMolecules(); std::vector molToProcMap(nGlobalMols, -1); // default to an error condition: - MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); + nProcessors = MPI::COMM_WORLD.Get_size(); if (nProcessors > nGlobalMols) { sprintf(painCave.errMsg, @@ -544,6 +544,7 @@ namespace OpenMD { nTarget = (int)(precast + 0.5); for(i = 0; i < nGlobalMols; i++) { + done = 0; loops = 0; @@ -568,13 +569,15 @@ namespace OpenMD { // and be done with it. if (loops > 100) { + sprintf(painCave.errMsg, - "I've tried 100 times to assign molecule %d to a " - " processor, but can't find a good spot.\n" - "I'm assigning it at random to processor %d.\n", - i, which_proc); - + "There have been 100 attempts to assign molecule %d to an\n" + "\tunderworked processor, but there's no good place to\n" + "\tleave it. OpenMD is assigning it at random to processor %d.\n", + i, which_proc); + painCave.isFatal = 0; + painCave.severity = OPENMD_INFO; simError(); molToProcMap[i] = which_proc; @@ -619,15 +622,14 @@ namespace OpenMD { } delete myRandom; - + // Spray out this nonsense to all other processors: - - MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); + MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); } else { // Listen to your marching orders from processor 0: - - MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); + MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); + } info->setMolToProcMap(molToProcMap); @@ -851,8 +853,9 @@ namespace OpenMD { // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 // docs said we could. std::vector tmpGroupMembership(info->getNGlobalAtoms(), 0); - MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], + &tmpGroupMembership[0], nGlobalAtoms, + MPI::INT, MPI::SUM); info->setGlobalGroupMembership(tmpGroupMembership); #else info->setGlobalGroupMembership(globalGroupMembership); @@ -869,9 +872,9 @@ namespace OpenMD { #ifdef IS_MPI std::vector tmpMolMembership(info->getNGlobalAtoms(), 0); - - MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, - MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], + nGlobalAtoms, + MPI::INT, MPI::SUM); info->setGlobalMolMembership(tmpMolMembership); #else @@ -888,8 +891,8 @@ namespace OpenMD { #ifdef IS_MPI std::vector numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); - MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], - info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); + MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], + info->getNGlobalMolecules(), MPI::INT, MPI::SUM); #else std::vector numIntegrableObjectsPerMol = nIOPerMol; #endif