--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2004/05/27 18:59:17 1203 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2004/06/01 21:45:22 1217 @@ -236,6 +236,10 @@ void mpiSimulation::divideLabor( ){ // Spray out this nonsense to all other processors: + //std::cerr << "node 0 mol2proc = \n"; + //for (i = 0; i < parallelData->nMolGlobal; i++) + // std::cerr << i << "\t" << MolToProcMap[i] << "\n"; + MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0, MPI_COMM_WORLD); @@ -394,15 +398,25 @@ void mpiSimulation::mpiRefresh( void ){ void mpiSimulation::mpiRefresh( void ){ int isError, i; - int *globalAtomIndex = new int[parallelData->nAtomsLocal]; - - // Fortran indexing needs to be increased by 1 in order to get the 2 languages to - // not barf + int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; + int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; - for(i=0; inAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; + // Fortran indexing needs to be increased by 1 in order to get the 2 + // languages to not barf + + for(i = 0; i < parallelData->nAtomsLocal; i++) + localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; + + for(i = 0; i < parallelData->nGroupsLocal; i++) + localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; isError = 0; - setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError ); + + setFsimParallel( parallelData, + &(parallelData->nAtomsLocal), localToGlobalAtomIndex, + &(parallelData->nGroupsLocal), localToGlobalGroupIndex, + &isError ); + if( isError ){ sprintf( painCave.errMsg, @@ -411,7 +425,8 @@ void mpiSimulation::mpiRefresh( void ){ simError(); } - delete[] globalAtomIndex; + delete[] localToGlobalGroupIndex; + delete[] localToGlobalAtomIndex; sprintf( checkPointMsg,