--- trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/04/01 16:50:14 441 +++ trunk/OOPSE/libmdtools/mpiSimulation.cpp 2003/04/03 20:21:54 447 @@ -4,7 +4,6 @@ #include #include #include -#include #include "mpiSimulation.hpp" #include "simError.h" @@ -20,7 +19,7 @@ mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) entryPlug = the_entryPlug; mpiPlug = new mpiSimData; - mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); + MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); mpiPlug->myNode = worldRank; MolToProcMap = new int[entryPlug->n_mol]; @@ -203,32 +202,32 @@ int* mpiSimulation::divideLabor( void ){ // Spray out this nonsense to all other processors: - MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, - MPI_INT, 0); + MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, - MPI_INT, 0); + MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, + MPI_INT, 0, MPI_COMM_WORLD); } else { // Listen to your marching orders from processor 0: - MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, - MPI_INT, 0); + MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, - MPI_INT, 0); + MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, + MPI_INT, 0, MPI_COMM_WORLD); - MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, - MPI_INT, 0); + MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, + MPI_INT, 0, MPI_COMM_WORLD); } @@ -250,8 +249,10 @@ int* mpiSimulation::divideLabor( void ){ } } - MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); - MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); + MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, + MPI_COMM_WORLD); + MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, + MPI_SUM, MPI_COMM_WORLD); if( nmol_global != entryPlug->n_mol ){ sprintf( painCave.errMsg,