# | Line 105 | Line 105 | namespace OpenMD { | |
---|---|---|
105 | ||
106 | if (worldRank == masterNode) { | |
107 | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | |
108 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
108 | #endif | |
109 | SimplePreprocessor preprocessor; | |
110 | preprocessor.preprocess(rawMetaDataStream, filename, | |
# | Line 115 | Line 114 | namespace OpenMD { | |
114 | //broadcasting the stream size | |
115 | streamSize = ppStream.str().size() +1; | |
116 | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | |
117 | < | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
117 | > | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
118 | streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | |
120 | – | |
121 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
122 | – | // MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
123 | – | // streamSize, MPI::CHAR, masterNode); |
124 | – | |
119 | } else { | |
120 | ||
121 | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | |
128 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
122 | ||
123 | //get stream size | |
124 | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | |
132 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
125 | char* buf = new char[streamSize]; | |
126 | assert(buf); | |
127 | ||
128 | //receive file content | |
129 | MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | |
138 | – | // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
130 | ||
131 | ppStream.str(buf); | |
132 | delete [] buf; | |
# | Line 531 | Line 522 | namespace OpenMD { | |
522 | // condition: | |
523 | ||
524 | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | |
534 | – | //nProcessors = MPI::COMM_WORLD.Get_size(); |
525 | ||
526 | if (nProcessors > nGlobalMols) { | |
527 | sprintf(painCave.errMsg, | |
# | Line 651 | Line 641 | namespace OpenMD { | |
641 | ||
642 | // Spray out this nonsense to all other processors: | |
643 | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | |
644 | < | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
644 | > | |
645 | } else { | |
646 | ||
647 | // Listen to your marching orders from processor 0: | |
648 | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | |
659 | – | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
649 | ||
650 | } | |
651 | ||
# | Line 800 | Line 789 | namespace OpenMD { | |
789 | storageLayout |= DataStorage::dslElectricField; | |
790 | } | |
791 | ||
792 | + | if (simParams->getOutputSitePotential() ) { |
793 | + | storageLayout |= DataStorage::dslSitePotential; |
794 | + | } |
795 | + | |
796 | if (simParams->getOutputFluctuatingCharges()) { | |
797 | storageLayout |= DataStorage::dslFlucQPosition; | |
798 | storageLayout |= DataStorage::dslFlucQVelocity; | |
# | Line 933 | Line 926 | namespace OpenMD { | |
926 | MPI_Allreduce(&globalGroupMembership[0], | |
927 | &tmpGroupMembership[0], nGlobalAtoms, | |
928 | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | |
929 | < | // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
937 | < | // &tmpGroupMembership[0], nGlobalAtoms, |
938 | < | // MPI::INT, MPI::SUM); |
929 | > | |
930 | info->setGlobalGroupMembership(tmpGroupMembership); | |
931 | #else | |
932 | info->setGlobalGroupMembership(globalGroupMembership); | |
# | Line 962 | Line 953 | namespace OpenMD { | |
953 | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | |
954 | nGlobalAtoms + nGlobalRigidBodies, | |
955 | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | |
965 | – | // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
966 | – | // nGlobalAtoms + nGlobalRigidBodies, |
967 | – | // MPI::INT, MPI::SUM); |
956 | ||
957 | info->setGlobalMolMembership(tmpMolMembership); | |
958 | #else | |
# | Line 984 | Line 972 | namespace OpenMD { | |
972 | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | |
973 | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | |
974 | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | |
987 | – | // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
988 | – | // info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
975 | #else | |
976 | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | |
977 | #endif |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |