--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/03/26 19:34:49 406 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2004/05/27 00:48:12 1198 @@ -1,16 +1,15 @@ #ifdef IS_MPI - -#include -#include +#include +#include +#include +#include #include -#include #include "mpiSimulation.hpp" #include "simError.h" #include "fortranWrappers.hpp" #include "randomSPRNG.hpp" - mpiSimulation* mpiSim; mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) @@ -18,12 +17,11 @@ mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) entryPlug = the_entryPlug; mpiPlug = new mpiSimData; - mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); + MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); mpiPlug->myNode = worldRank; MolToProcMap = new int[entryPlug->n_mol]; MolComponentType = new int[entryPlug->n_mol]; - AtomToProcMap = new int[entryPlug->n_atoms]; mpiSim = this; @@ -33,18 +31,20 @@ mpiSimulation::~mpiSimulation(){ mpiSimulation::~mpiSimulation(){ + delete[] MolToProcMap; + delete[] MolComponentType; + delete[] AtomToProcMap; + delete mpiPlug; // perhaps we should let fortran know the party is over. } -int* mpiSimulation::divideLabor( void ){ +void mpiSimulation::divideLabor( ){ - int* globalIndex; - int nComponents; MoleculeStamp** compStamps; - randomSPRNG myRandom; + randomSPRNG *myRandom; int* componentsNmol; int* AtomsPerProc; @@ -55,18 +55,17 @@ int* mpiSimulation::divideLabor( void ){ int old_atoms, add_atoms, new_atoms; int nTarget; - int molIndex, atomIndex, compIndex, compStart; + int molIndex, atomIndex; int done; - int nLocal, molLocal; - int i, index; - int smallDiff, bigDiff; + int i, j, loops, which_proc, nmol_local, natoms_local; + int nmol_global, natoms_global; + int local_index; + int baseSeed = entryPlug->getSeed(); - int testSum; - nComponents = entryPlug->nComponents; compStamps = entryPlug->compStamps; componentsNmol = entryPlug->componentsNmol; - AtomsPerProc = new int[mpiPlug->numberProcessors]; + AtomsPerProc = new int[mpiPlug->nProcessors]; mpiPlug->nAtomsGlobal = entryPlug->n_atoms; mpiPlug->nBondsGlobal = entryPlug->n_bonds; @@ -74,13 +73,14 @@ int* mpiSimulation::divideLabor( void ){ mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; mpiPlug->nSRIGlobal = entryPlug->n_SRI; mpiPlug->nMolGlobal = entryPlug->n_mol; + mpiPlug->nGroupsGlobal = entryPlug->ngroup; - myRandom = new randomSPRNG(); + myRandom = new randomSPRNG( baseSeed ); - a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; + a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; // Initialize things that we'll send out later: - for (i = 0; i < mpiPlug->numberProcessors; i++ ) { + for (i = 0; i < mpiPlug->nProcessors; i++ ) { AtomsPerProc[i] = 0; } for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { @@ -95,7 +95,7 @@ int* mpiSimulation::divideLabor( void ){ if (mpiPlug->myNode == 0) { numerator = (double) entryPlug->n_atoms; - denominator = (double) mpiPlug->numberProcessors; + denominator = (double) mpiPlug->nProcessors; precast = numerator / denominator; nTarget = (int)( precast + 0.5 ); @@ -120,33 +120,13 @@ int* mpiSimulation::divideLabor( void ){ // Pick a processor at random - which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); + which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); // How many atoms does this processor have? old_atoms = AtomsPerProc[which_proc]; - - // If the processor already had too many atoms, just skip this - // processor and try again. - - if (old_atoms >= nTarget) continue; - - add_atoms = compStamps[MolComponentType[i]]->getNatoms(); + add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); new_atoms = old_atoms + add_atoms; - - // If we can add this molecule to this processor without sending - // it above nTarget, then go ahead and do it: - - if (new_atoms <= nTarget) { - MolToProcMap[i] = which_proc; - AtomsPerProc[which_proc] += add_atoms; - for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; - } - done = 1; - continue; - } // If we've been through this loop too many times, we need // to just give up and assign the molecule to this processor @@ -164,31 +144,46 @@ int* mpiSimulation::divideLabor( void ){ MolToProcMap[i] = which_proc; AtomsPerProc[which_proc] += add_atoms; for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; } done = 1; continue; } + + // If we can add this molecule to this processor without sending + // it above nTarget, then go ahead and do it: + + if (new_atoms <= nTarget) { + MolToProcMap[i] = which_proc; + AtomsPerProc[which_proc] += add_atoms; + for (j = 0 ; j < add_atoms; j++ ) { + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; + } + done = 1; + continue; + } - // The only situation left is where old_atoms < nTarget, but - // new_atoms > nTarget. We want to accept this with some - // probability that dies off the farther we are from nTarget + // The only situation left is when new_atoms > nTarget. We + // want to accept this with some probability that dies off the + // farther we are from nTarget + // roughly: x = new_atoms - nTarget // Pacc(x) = exp(- a * x) - // where a = 1 / (average atoms per molecule) + // where a = penalty / (average atoms per molecule) x = (double) (new_atoms - nTarget); - y = myRandom.getRandom(); - - if (exp(- a * x) > y) { + y = myRandom->getRandom(); + + if (y < exp(- a * x)) { MolToProcMap[i] = which_proc; AtomsPerProc[which_proc] += add_atoms; for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; - } + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; + } done = 1; continue; } else { @@ -200,32 +195,34 @@ int* mpiSimulation::divideLabor( void ){ // Spray out this nonsense to all other processors: - MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, - MPI_INT, 0); + MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, - MPI_INT, 0); + MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, + MPI_INT, 0, MPI_COMM_WORLD); } else { // Listen to your marching orders from processor 0: - MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, - MPI_INT, 0); + MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, - MPI_INT, 0); + MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, + MPI_INT, 0, MPI_COMM_WORLD); + + } @@ -245,8 +242,10 @@ int* mpiSimulation::divideLabor( void ){ } } - MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); - MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); + MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, + MPI_COMM_WORLD); + MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, + MPI_SUM, MPI_COMM_WORLD); if( nmol_global != entryPlug->n_mol ){ sprintf( painCave.errMsg, @@ -270,38 +269,51 @@ int* mpiSimulation::divideLabor( void ){ "Successfully divided the molecules among the processors.\n" ); MPIcheckPoint(); - mpiPlug->myNMol = nmol_local; - mpiPlug->myNlocal = natoms_local; + mpiPlug->nMolLocal = nmol_local; + mpiPlug->nAtomsLocal = natoms_local; - globalIndex = new int[mpiPlug->myNlocal]; + globalAtomIndex.resize(mpiPlug->nAtomsLocal); + globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); local_index = 0; for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { if (AtomToProcMap[i] == mpiPlug->myNode) { + globalAtomIndex[local_index] = i; + + globalToLocalAtom[i] = local_index; local_index++; - globalIndex[local_index] = i; + } + else + globalToLocalAtom[i] = -1; } - - - - index = mpiPlug->myAtomStart; -// for( i=0; imyNlocal; i++){ -// globalIndex[i] = index; -// index++; -// } - -// return globalIndex; + globalMolIndex.resize(mpiPlug->nMolLocal); + globalToLocalMol.resize(mpiPlug->nMolGlobal); + + local_index = 0; + for (i = 0; i < mpiPlug->nMolGlobal; i++) { + if (MolToProcMap[i] == mpiPlug->myNode) { + globalMolIndex[local_index] = i; + globalToLocalMol[i] = local_index; + local_index++; + } + else + globalToLocalMol[i] = -1; + } + } void mpiSimulation::mpiRefresh( void ){ int isError, i; - int *globalIndex = new int[mpiPlug->myNlocal]; + int *globalIndex = new int[mpiPlug->nAtomsLocal]; - for(i=0; imyNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); + // Fortran indexing needs to be increased by 1 in order to get the 2 languages to + // not barf + for(i=0; inAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; + isError = 0; setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );