| 64 |  | #include "brains/ForceField.hpp" | 
| 65 |  | #include "utils/simError.h" | 
| 66 |  | #include "utils/StringUtils.hpp" | 
| 67 | + | #include "utils/Revision.hpp" | 
| 68 |  | #include "math/SeqRandNumGen.hpp" | 
| 69 |  | #include "mdParser/MDLexer.hpp" | 
| 70 |  | #include "mdParser/MDParser.hpp" | 
| 105 |  |  | 
| 106 |  | if (worldRank == masterNode) { | 
| 107 |  | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 107 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 108 |  | #endif | 
| 109 |  | SimplePreprocessor preprocessor; | 
| 110 |  | preprocessor.preprocess(rawMetaDataStream, filename, | 
| 114 |  | //broadcasting the stream size | 
| 115 |  | streamSize = ppStream.str().size() +1; | 
| 116 |  | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 117 | < | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 117 | > | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 118 |  | streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 119 | – |  | 
| 120 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 121 | – | // MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 122 | – | //                       streamSize, MPI::CHAR, masterNode); | 
| 123 | – |  | 
| 119 |  | } else { | 
| 120 |  |  | 
| 121 |  | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 127 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 122 |  |  | 
| 123 |  | //get stream size | 
| 124 |  | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 131 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 125 |  | char* buf = new char[streamSize]; | 
| 126 |  | assert(buf); | 
| 127 |  |  | 
| 128 |  | //receive file content | 
| 129 |  | MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 137 | – | // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 130 |  |  | 
| 131 |  | ppStream.str(buf); | 
| 132 |  | delete [] buf; | 
| 264 |  | version.append("."); | 
| 265 |  | version.append(OPENMD_VERSION_MINOR); | 
| 266 |  |  | 
| 267 | < | std::string svnrev; | 
| 267 | > | std::string svnrev(g_REVISION, strnlen(g_REVISION, 20)); | 
| 268 |  | //convert a macro from compiler to a string in c++ | 
| 269 | < | STR_DEFINE(svnrev, SVN_REV ); | 
| 269 | > | // STR_DEFINE(svnrev, SVN_REV ); | 
| 270 |  | version.append(" Revision: "); | 
| 271 |  | // If there's no SVN revision, just call this the RELEASE revision. | 
| 272 |  | if (!svnrev.empty()) { | 
| 522 |  | // condition: | 
| 523 |  |  | 
| 524 |  | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | 
| 533 | – | //nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 525 |  |  | 
| 526 |  | if (nProcessors > nGlobalMols) { | 
| 527 |  | sprintf(painCave.errMsg, | 
| 641 |  |  | 
| 642 |  | // Spray out this nonsense to all other processors: | 
| 643 |  | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 644 | < | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 644 | > |  | 
| 645 |  | } else { | 
| 646 |  |  | 
| 647 |  | // Listen to your marching orders from processor 0: | 
| 648 |  | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 658 | – | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 649 |  |  | 
| 650 |  | } | 
| 651 |  |  | 
| 922 |  | MPI_Allreduce(&globalGroupMembership[0], | 
| 923 |  | &tmpGroupMembership[0], nGlobalAtoms, | 
| 924 |  | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 925 | < | // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], | 
| 936 | < | //                           &tmpGroupMembership[0], nGlobalAtoms, | 
| 937 | < | //                           MPI::INT, MPI::SUM); | 
| 925 | > |  | 
| 926 |  | info->setGlobalGroupMembership(tmpGroupMembership); | 
| 927 |  | #else | 
| 928 |  | info->setGlobalGroupMembership(globalGroupMembership); | 
| 949 |  | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 950 |  | nGlobalAtoms + nGlobalRigidBodies, | 
| 951 |  | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 964 | – | // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 965 | – | //                           nGlobalAtoms + nGlobalRigidBodies, | 
| 966 | – | //                           MPI::INT, MPI::SUM); | 
| 952 |  |  | 
| 953 |  | info->setGlobalMolMembership(tmpMolMembership); | 
| 954 |  | #else | 
| 968 |  | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 969 |  | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 970 |  | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 986 | – | // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 987 | – | //                           info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 971 |  | #else | 
| 972 |  | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 973 |  | #endif |