# | Line 46 | Line 46 | |
---|---|---|
46 | * @date 11/03/2004 | |
47 | * @version 1.0 | |
48 | */ | |
49 | + | |
50 | + | #ifdef IS_MPI |
51 | + | #include "mpi.h" |
52 | + | #include "math/ParallelRandNumGen.hpp" |
53 | + | #endif |
54 | + | |
55 | #include <exception> | |
56 | #include <iostream> | |
57 | #include <sstream> | |
# | Line 83 | Line 89 | |
89 | #include "types/FixedChargeAdapter.hpp" | |
90 | #include "types/FluctuatingChargeAdapter.hpp" | |
91 | ||
86 | – | #ifdef IS_MPI |
87 | – | #include "mpi.h" |
88 | – | #include "math/ParallelRandNumGen.hpp" |
89 | – | #endif |
92 | ||
93 | namespace OpenMD { | |
94 | ||
# | Line 101 | Line 103 | namespace OpenMD { | |
103 | const int masterNode = 0; | |
104 | ||
105 | if (worldRank == masterNode) { | |
106 | < | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
106 | > | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
107 | > | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
108 | #endif | |
109 | SimplePreprocessor preprocessor; | |
110 | < | preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
110 | > | preprocessor.preprocess(rawMetaDataStream, filename, |
111 | > | startOfMetaDataBlock, ppStream); |
112 | ||
113 | #ifdef IS_MPI | |
114 | < | //brocasting the stream size |
114 | > | //broadcasting the stream size |
115 | streamSize = ppStream.str().size() +1; | |
116 | < | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
117 | < | MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
116 | > | MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
117 | > | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
118 | > | streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
119 | > | |
120 | > | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
121 | > | // MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
122 | > | // streamSize, MPI::CHAR, masterNode); |
123 | ||
124 | } else { | |
116 | – | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
125 | ||
126 | < | //get stream size |
127 | < | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
126 | > | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
127 | > | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
128 | ||
129 | + | //get stream size |
130 | + | MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
131 | + | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
132 | char* buf = new char[streamSize]; | |
133 | assert(buf); | |
134 | ||
135 | //receive file content | |
136 | < | MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
137 | < | |
136 | > | MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
137 | > | // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
138 | > | |
139 | ppStream.str(buf); | |
140 | delete [] buf; | |
141 | } | |
# | Line 147 | Line 159 | namespace OpenMD { | |
159 | parser.initializeASTFactory(factory); | |
160 | parser.setASTFactory(&factory); | |
161 | parser.mdfile(); | |
150 | – | |
162 | // Create a tree parser that reads information into Globals | |
163 | MDTreeParser treeParser; | |
164 | treeParser.initializeASTFactory(factory); | |
# | Line 518 | Line 529 | namespace OpenMD { | |
529 | // error | |
530 | // condition: | |
531 | ||
532 | < | nProcessors = MPI::COMM_WORLD.Get_size(); |
532 | > | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); |
533 | > | //nProcessors = MPI::COMM_WORLD.Get_size(); |
534 | ||
535 | if (nProcessors > nGlobalMols) { | |
536 | sprintf(painCave.errMsg, | |
# | Line 637 | Line 649 | namespace OpenMD { | |
649 | delete myRandom; | |
650 | ||
651 | // Spray out this nonsense to all other processors: | |
652 | < | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
652 | > | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
653 | > | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
654 | } else { | |
655 | ||
656 | // Listen to your marching orders from processor 0: | |
657 | < | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
657 | > | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
658 | > | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
659 | ||
660 | } | |
661 | ||
# | Line 801 | Line 815 | namespace OpenMD { | |
815 | Molecule::AtomIterator ai; | |
816 | Molecule::RigidBodyIterator ri; | |
817 | Molecule::CutoffGroupIterator ci; | |
818 | + | Molecule::BondIterator boi; |
819 | + | Molecule::BendIterator bei; |
820 | + | Molecule::TorsionIterator ti; |
821 | + | Molecule::InversionIterator ii; |
822 | Molecule::IntegrableObjectIterator ioi; | |
823 | < | Molecule * mol; |
824 | < | Atom * atom; |
825 | < | RigidBody * rb; |
826 | < | CutoffGroup * cg; |
823 | > | Molecule* mol; |
824 | > | Atom* atom; |
825 | > | RigidBody* rb; |
826 | > | CutoffGroup* cg; |
827 | > | Bond* bond; |
828 | > | Bend* bend; |
829 | > | Torsion* torsion; |
830 | > | Inversion* inversion; |
831 | int beginAtomIndex; | |
832 | int beginRigidBodyIndex; | |
833 | int beginCutoffGroupIndex; | |
834 | + | int beginBondIndex; |
835 | + | int beginBendIndex; |
836 | + | int beginTorsionIndex; |
837 | + | int beginInversionIndex; |
838 | int nGlobalAtoms = info->getNGlobalAtoms(); | |
839 | int nGlobalRigidBodies = info->getNGlobalRigidBodies(); | |
840 | ||
841 | beginAtomIndex = 0; | |
842 | < | //rigidbody's index begins right after atom's |
842 | > | // The rigid body indices begin immediately after the atom indices: |
843 | beginRigidBodyIndex = info->getNGlobalAtoms(); | |
844 | beginCutoffGroupIndex = 0; | |
845 | < | |
845 | > | beginBondIndex = 0; |
846 | > | beginBendIndex = 0; |
847 | > | beginTorsionIndex = 0; |
848 | > | beginInversionIndex = 0; |
849 | > | |
850 | for(int i = 0; i < info->getNGlobalMolecules(); i++) { | |
851 | ||
852 | #ifdef IS_MPI | |
# | Line 825 | Line 855 | namespace OpenMD { | |
855 | // stuff to do if I own this molecule | |
856 | mol = info->getMoleculeByGlobalIndex(i); | |
857 | ||
858 | < | //local index(index in DataStorge) of atom is important |
859 | < | for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
858 | > | // The local index(index in DataStorge) of the atom is important: |
859 | > | for(atom = mol->beginAtom(ai); atom != NULL; |
860 | > | atom = mol->nextAtom(ai)) { |
861 | atom->setGlobalIndex(beginAtomIndex++); | |
862 | } | |
863 | ||
# | Line 835 | Line 866 | namespace OpenMD { | |
866 | rb->setGlobalIndex(beginRigidBodyIndex++); | |
867 | } | |
868 | ||
869 | < | //local index of cutoff group is trivial, it only depends on |
870 | < | //the order of travesing |
869 | > | // The local index of other objects only depends on the order |
870 | > | // of traversal: |
871 | for(cg = mol->beginCutoffGroup(ci); cg != NULL; | |
872 | cg = mol->nextCutoffGroup(ci)) { | |
873 | cg->setGlobalIndex(beginCutoffGroupIndex++); | |
874 | } | |
875 | + | for(bond = mol->beginBond(boi); bond != NULL; |
876 | + | bond = mol->nextBond(boi)) { |
877 | + | bond->setGlobalIndex(beginBondIndex++); |
878 | + | } |
879 | + | for(bend = mol->beginBend(bei); bend != NULL; |
880 | + | bend = mol->nextBend(bei)) { |
881 | + | bend->setGlobalIndex(beginBendIndex++); |
882 | + | } |
883 | + | for(torsion = mol->beginTorsion(ti); torsion != NULL; |
884 | + | torsion = mol->nextTorsion(ti)) { |
885 | + | torsion->setGlobalIndex(beginTorsionIndex++); |
886 | + | } |
887 | + | for(inversion = mol->beginInversion(ii); inversion != NULL; |
888 | + | inversion = mol->nextInversion(ii)) { |
889 | + | inversion->setGlobalIndex(beginInversionIndex++); |
890 | + | } |
891 | ||
892 | #ifdef IS_MPI | |
893 | } else { | |
# | Line 853 | Line 900 | namespace OpenMD { | |
900 | beginAtomIndex += stamp->getNAtoms(); | |
901 | beginRigidBodyIndex += stamp->getNRigidBodies(); | |
902 | beginCutoffGroupIndex += stamp->getNCutoffGroups() + stamp->getNFreeAtoms(); | |
903 | + | beginBondIndex += stamp->getNBonds(); |
904 | + | beginBendIndex += stamp->getNBends(); |
905 | + | beginTorsionIndex += stamp->getNTorsions(); |
906 | + | beginInversionIndex += stamp->getNInversions(); |
907 | } | |
908 | #endif | |
909 | ||
# | Line 860 | Line 911 | namespace OpenMD { | |
911 | ||
912 | //fill globalGroupMembership | |
913 | std::vector<int> globalGroupMembership(info->getNGlobalAtoms(), 0); | |
914 | < | for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
915 | < | for (cg = mol->beginCutoffGroup(ci); cg != NULL; cg = mol->nextCutoffGroup(ci)) { |
916 | < | |
914 | > | for(mol = info->beginMolecule(mi); mol != NULL; |
915 | > | mol = info->nextMolecule(mi)) { |
916 | > | for (cg = mol->beginCutoffGroup(ci); cg != NULL; |
917 | > | cg = mol->nextCutoffGroup(ci)) { |
918 | for(atom = cg->beginAtom(ai); atom != NULL; atom = cg->nextAtom(ai)) { | |
919 | globalGroupMembership[atom->getGlobalIndex()] = cg->getGlobalIndex(); | |
920 | } | |
# | Line 877 | Line 929 | namespace OpenMD { | |
929 | // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | |
930 | // docs said we could. | |
931 | std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | |
932 | < | MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
933 | < | &tmpGroupMembership[0], nGlobalAtoms, |
934 | < | MPI::INT, MPI::SUM); |
932 | > | MPI_Allreduce(&globalGroupMembership[0], |
933 | > | &tmpGroupMembership[0], nGlobalAtoms, |
934 | > | MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
935 | > | // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
936 | > | // &tmpGroupMembership[0], nGlobalAtoms, |
937 | > | // MPI::INT, MPI::SUM); |
938 | info->setGlobalGroupMembership(tmpGroupMembership); | |
939 | #else | |
940 | info->setGlobalGroupMembership(globalGroupMembership); | |
# | Line 903 | Line 958 | namespace OpenMD { | |
958 | #ifdef IS_MPI | |
959 | std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + | |
960 | info->getNGlobalRigidBodies(), 0); | |
961 | < | MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
962 | < | nGlobalAtoms + nGlobalRigidBodies, |
963 | < | MPI::INT, MPI::SUM); |
961 | > | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
962 | > | nGlobalAtoms + nGlobalRigidBodies, |
963 | > | MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
964 | > | // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
965 | > | // nGlobalAtoms + nGlobalRigidBodies, |
966 | > | // MPI::INT, MPI::SUM); |
967 | ||
968 | info->setGlobalMolMembership(tmpMolMembership); | |
969 | #else | |
# | Line 923 | Line 981 | namespace OpenMD { | |
981 | ||
982 | #ifdef IS_MPI | |
983 | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | |
984 | < | MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
985 | < | info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
984 | > | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
985 | > | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
986 | > | // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
987 | > | // info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
988 | #else | |
989 | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | |
990 | #endif |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |