--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/03/26 23:14:02 416 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/03/27 19:21:42 422 @@ -25,7 +25,6 @@ mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) MolToProcMap = new int[entryPlug->n_mol]; MolComponentType = new int[entryPlug->n_mol]; - AtomToProcMap = new int[entryPlug->n_atoms]; mpiSim = this; @@ -35,6 +34,10 @@ mpiSimulation::~mpiSimulation(){ mpiSimulation::~mpiSimulation(){ + delete[] MolToProcMap; + delete[] MolComponentType; + delete[] AtomToProcMap; + delete mpiPlug; // perhaps we should let fortran know the party is over. @@ -146,8 +149,8 @@ int* mpiSimulation::divideLabor( void ){ MolToProcMap[i] = which_proc; AtomsPerProc[which_proc] += add_atoms; for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; } done = 1; continue; @@ -169,8 +172,8 @@ int* mpiSimulation::divideLabor( void ){ MolToProcMap[i] = which_proc; AtomsPerProc[which_proc] += add_atoms; for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; } done = 1; continue; @@ -191,9 +194,9 @@ int* mpiSimulation::divideLabor( void ){ MolToProcMap[i] = which_proc; AtomsPerProc[which_proc] += add_atoms; for (j = 0 ; j < add_atoms; j++ ) { - atomIndex++; - AtomToProcMap[atomIndex] = which_proc; - } + AtomToProcMap[atomIndex] = which_proc; + atomIndex++; + } done = 1; continue; } else { @@ -205,31 +208,31 @@ int* mpiSimulation::divideLabor( void ){ // Spray out this nonsense to all other processors: - MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, + MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, + MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, + MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, MPI_INT, 0); } else { // Listen to your marching orders from processor 0: - MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, + MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, + MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, MPI_INT, 0); - MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, + MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, MPI_INT, 0); } @@ -282,8 +285,8 @@ int* mpiSimulation::divideLabor( void ){ local_index = 0; for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { if (AtomToProcMap[i] == mpiPlug->myNode) { - local_index++; globalIndex[local_index] = i; + local_index++; } }