--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/04/03 20:21:54 447 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2004/05/27 00:48:12 1198 @@ -1,8 +1,8 @@ #ifdef IS_MPI #include -#include -#include -#include +#include +#include +#include #include #include "mpiSimulation.hpp" @@ -10,8 +10,6 @@ #include "fortranWrappers.hpp" #include "randomSPRNG.hpp" -#define BASE_SEED 123456789 - mpiSimulation* mpiSim; mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) @@ -19,7 +17,7 @@ mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) entryPlug = the_entryPlug; mpiPlug = new mpiSimData; - MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); + MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); mpiPlug->myNode = worldRank; MolToProcMap = new int[entryPlug->n_mol]; @@ -42,10 +40,8 @@ mpiSimulation::~mpiSimulation(){ } -int* mpiSimulation::divideLabor( void ){ +void mpiSimulation::divideLabor( ){ - int* globalIndex; - int nComponents; MoleculeStamp** compStamps; randomSPRNG *myRandom; @@ -59,21 +55,17 @@ int* mpiSimulation::divideLabor( void ){ int old_atoms, add_atoms, new_atoms; int nTarget; - int molIndex, atomIndex, compIndex, compStart; + int molIndex, atomIndex; int done; - int nLocal, molLocal; int i, j, loops, which_proc, nmol_local, natoms_local; int nmol_global, natoms_global; - int local_index, index; - int smallDiff, bigDiff; - int baseSeed = BASE_SEED; + int local_index; + int baseSeed = entryPlug->getSeed(); - int testSum; - nComponents = entryPlug->nComponents; compStamps = entryPlug->compStamps; componentsNmol = entryPlug->componentsNmol; - AtomsPerProc = new int[mpiPlug->numberProcessors]; + AtomsPerProc = new int[mpiPlug->nProcessors]; mpiPlug->nAtomsGlobal = entryPlug->n_atoms; mpiPlug->nBondsGlobal = entryPlug->n_bonds; @@ -81,13 +73,14 @@ int* mpiSimulation::divideLabor( void ){ mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; mpiPlug->nSRIGlobal = entryPlug->n_SRI; mpiPlug->nMolGlobal = entryPlug->n_mol; + mpiPlug->nGroupsGlobal = entryPlug->ngroup; myRandom = new randomSPRNG( baseSeed ); a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; // Initialize things that we'll send out later: - for (i = 0; i < mpiPlug->numberProcessors; i++ ) { + for (i = 0; i < mpiPlug->nProcessors; i++ ) { AtomsPerProc[i] = 0; } for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { @@ -102,7 +95,7 @@ int* mpiSimulation::divideLabor( void ){ if (mpiPlug->myNode == 0) { numerator = (double) entryPlug->n_atoms; - denominator = (double) mpiPlug->numberProcessors; + denominator = (double) mpiPlug->nProcessors; precast = numerator / denominator; nTarget = (int)( precast + 0.5 ); @@ -127,7 +120,7 @@ int* mpiSimulation::divideLabor( void ){ // Pick a processor at random - which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); + which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); // How many atoms does this processor have? @@ -211,7 +204,7 @@ int* mpiSimulation::divideLabor( void ){ MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, MPI_INT, 0, MPI_COMM_WORLD); - MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, + MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, MPI_INT, 0, MPI_COMM_WORLD); } else { @@ -226,7 +219,7 @@ int* mpiSimulation::divideLabor( void ){ MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, MPI_INT, 0, MPI_COMM_WORLD); - MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, + MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, MPI_INT, 0, MPI_COMM_WORLD); @@ -276,31 +269,50 @@ int* mpiSimulation::divideLabor( void ){ "Successfully divided the molecules among the processors.\n" ); MPIcheckPoint(); - mpiPlug->myNMol = nmol_local; - mpiPlug->myNlocal = natoms_local; + mpiPlug->nMolLocal = nmol_local; + mpiPlug->nAtomsLocal = natoms_local; - globalIndex = new int[mpiPlug->myNlocal]; + globalAtomIndex.resize(mpiPlug->nAtomsLocal); + globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); local_index = 0; for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { if (AtomToProcMap[i] == mpiPlug->myNode) { - globalIndex[local_index] = i; + globalAtomIndex[local_index] = i; + + globalToLocalAtom[i] = local_index; local_index++; + } + else + globalToLocalAtom[i] = -1; } + + globalMolIndex.resize(mpiPlug->nMolLocal); + globalToLocalMol.resize(mpiPlug->nMolGlobal); - return globalIndex; + local_index = 0; + for (i = 0; i < mpiPlug->nMolGlobal; i++) { + if (MolToProcMap[i] == mpiPlug->myNode) { + globalMolIndex[local_index] = i; + globalToLocalMol[i] = local_index; + local_index++; + } + else + globalToLocalMol[i] = -1; + } + } void mpiSimulation::mpiRefresh( void ){ int isError, i; - int *globalIndex = new int[mpiPlug->myNlocal]; + int *globalIndex = new int[mpiPlug->nAtomsLocal]; // Fortran indexing needs to be increased by 1 in order to get the 2 languages to // not barf - for(i=0; imyNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; + for(i=0; inAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; isError = 0;