--- trunk/src/brains/SimCreator.cpp 2013/06/17 18:28:30 1880 +++ trunk/src/brains/SimCreator.cpp 2014/02/26 14:14:50 1969 @@ -46,6 +46,12 @@ * @date 11/03/2004 * @version 1.0 */ + +#ifdef IS_MPI +#include "mpi.h" +#include "math/ParallelRandNumGen.hpp" +#endif + #include #include #include @@ -83,10 +89,6 @@ #include "types/FixedChargeAdapter.hpp" #include "types/FluctuatingChargeAdapter.hpp" -#ifdef IS_MPI -#include "mpi.h" -#include "math/ParallelRandNumGen.hpp" -#endif namespace OpenMD { @@ -101,29 +103,39 @@ namespace OpenMD { const int masterNode = 0; if (worldRank == masterNode) { - MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); + MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); #endif SimplePreprocessor preprocessor; - preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); + preprocessor.preprocess(rawMetaDataStream, filename, + startOfMetaDataBlock, ppStream); #ifdef IS_MPI - //brocasting the stream size + //broadcasting the stream size streamSize = ppStream.str().size() +1; - MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); - MPI::COMM_WORLD.Bcast(static_cast(const_cast(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); + MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); + MPI_Bcast(static_cast(const_cast(ppStream.str().c_str())), + streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); + + // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); + // MPI::COMM_WORLD.Bcast(static_cast(const_cast(ppStream.str().c_str())), + // streamSize, MPI::CHAR, masterNode); } else { - MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); - //get stream size - MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); + MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); + //get stream size + MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); char* buf = new char[streamSize]; assert(buf); //receive file content - MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); - + MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); + ppStream.str(buf); delete [] buf; } @@ -147,7 +159,6 @@ namespace OpenMD { parser.initializeASTFactory(factory); parser.setASTFactory(&factory); parser.mdfile(); - // Create a tree parser that reads information into Globals MDTreeParser treeParser; treeParser.initializeASTFactory(factory); @@ -518,7 +529,8 @@ namespace OpenMD { // error // condition: - nProcessors = MPI::COMM_WORLD.Get_size(); + MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); + //nProcessors = MPI::COMM_WORLD.Get_size(); if (nProcessors > nGlobalMols) { sprintf(painCave.errMsg, @@ -637,11 +649,13 @@ namespace OpenMD { delete myRandom; // Spray out this nonsense to all other processors: - MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); + MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); } else { // Listen to your marching orders from processor 0: - MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); + MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); } @@ -801,22 +815,38 @@ namespace OpenMD { Molecule::AtomIterator ai; Molecule::RigidBodyIterator ri; Molecule::CutoffGroupIterator ci; + Molecule::BondIterator boi; + Molecule::BendIterator bei; + Molecule::TorsionIterator ti; + Molecule::InversionIterator ii; Molecule::IntegrableObjectIterator ioi; - Molecule * mol; - Atom * atom; - RigidBody * rb; - CutoffGroup * cg; + Molecule* mol; + Atom* atom; + RigidBody* rb; + CutoffGroup* cg; + Bond* bond; + Bend* bend; + Torsion* torsion; + Inversion* inversion; int beginAtomIndex; int beginRigidBodyIndex; int beginCutoffGroupIndex; + int beginBondIndex; + int beginBendIndex; + int beginTorsionIndex; + int beginInversionIndex; int nGlobalAtoms = info->getNGlobalAtoms(); int nGlobalRigidBodies = info->getNGlobalRigidBodies(); beginAtomIndex = 0; - //rigidbody's index begins right after atom's + // The rigid body indices begin immediately after the atom indices: beginRigidBodyIndex = info->getNGlobalAtoms(); beginCutoffGroupIndex = 0; - + beginBondIndex = 0; + beginBendIndex = 0; + beginTorsionIndex = 0; + beginInversionIndex = 0; + for(int i = 0; i < info->getNGlobalMolecules(); i++) { #ifdef IS_MPI @@ -825,8 +855,9 @@ namespace OpenMD { // stuff to do if I own this molecule mol = info->getMoleculeByGlobalIndex(i); - //local index(index in DataStorge) of atom is important - for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { + // The local index(index in DataStorge) of the atom is important: + for(atom = mol->beginAtom(ai); atom != NULL; + atom = mol->nextAtom(ai)) { atom->setGlobalIndex(beginAtomIndex++); } @@ -835,12 +866,28 @@ namespace OpenMD { rb->setGlobalIndex(beginRigidBodyIndex++); } - //local index of cutoff group is trivial, it only depends on - //the order of travesing + // The local index of other objects only depends on the order + // of traversal: for(cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { cg->setGlobalIndex(beginCutoffGroupIndex++); } + for(bond = mol->beginBond(boi); bond != NULL; + bond = mol->nextBond(boi)) { + bond->setGlobalIndex(beginBondIndex++); + } + for(bend = mol->beginBend(bei); bend != NULL; + bend = mol->nextBend(bei)) { + bend->setGlobalIndex(beginBendIndex++); + } + for(torsion = mol->beginTorsion(ti); torsion != NULL; + torsion = mol->nextTorsion(ti)) { + torsion->setGlobalIndex(beginTorsionIndex++); + } + for(inversion = mol->beginInversion(ii); inversion != NULL; + inversion = mol->nextInversion(ii)) { + inversion->setGlobalIndex(beginInversionIndex++); + } #ifdef IS_MPI } else { @@ -853,6 +900,10 @@ namespace OpenMD { beginAtomIndex += stamp->getNAtoms(); beginRigidBodyIndex += stamp->getNRigidBodies(); beginCutoffGroupIndex += stamp->getNCutoffGroups() + stamp->getNFreeAtoms(); + beginBondIndex += stamp->getNBonds(); + beginBendIndex += stamp->getNBends(); + beginTorsionIndex += stamp->getNTorsions(); + beginInversionIndex += stamp->getNInversions(); } #endif @@ -860,9 +911,10 @@ namespace OpenMD { //fill globalGroupMembership std::vector globalGroupMembership(info->getNGlobalAtoms(), 0); - for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { - for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { - + for(mol = info->beginMolecule(mi); mol != NULL; + mol = info->nextMolecule(mi)) { + for (cg = mol->beginCutoffGroup(ci); cg != NULL; + cg = mol->nextCutoffGroup(ci)) { for(atom = cg->beginAtom(ai); atom != NULL; atom = cg->nextAtom(ai)) { globalGroupMembership[atom->getGlobalIndex()] = cg->getGlobalIndex(); } @@ -877,9 +929,12 @@ namespace OpenMD { // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 // docs said we could. std::vector tmpGroupMembership(info->getNGlobalAtoms(), 0); - MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], - &tmpGroupMembership[0], nGlobalAtoms, - MPI::INT, MPI::SUM); + MPI_Allreduce(&globalGroupMembership[0], + &tmpGroupMembership[0], nGlobalAtoms, + MPI_INT, MPI_SUM, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], + // &tmpGroupMembership[0], nGlobalAtoms, + // MPI::INT, MPI::SUM); info->setGlobalGroupMembership(tmpGroupMembership); #else info->setGlobalGroupMembership(globalGroupMembership); @@ -903,9 +958,12 @@ namespace OpenMD { #ifdef IS_MPI std::vector tmpMolMembership(info->getNGlobalAtoms() + info->getNGlobalRigidBodies(), 0); - MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], - nGlobalAtoms + nGlobalRigidBodies, - MPI::INT, MPI::SUM); + MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], + nGlobalAtoms + nGlobalRigidBodies, + MPI_INT, MPI_SUM, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], + // nGlobalAtoms + nGlobalRigidBodies, + // MPI::INT, MPI::SUM); info->setGlobalMolMembership(tmpMolMembership); #else @@ -923,8 +981,10 @@ namespace OpenMD { #ifdef IS_MPI std::vector numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); - MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], - info->getNGlobalMolecules(), MPI::INT, MPI::SUM); + MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], + info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); + // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], + // info->getNGlobalMolecules(), MPI::INT, MPI::SUM); #else std::vector numIntegrableObjectsPerMol = nIOPerMol; #endif