| 64 |  | #include "brains/ForceField.hpp" | 
| 65 |  | #include "utils/simError.h" | 
| 66 |  | #include "utils/StringUtils.hpp" | 
| 67 | + | #include "utils/Revision.hpp" | 
| 68 |  | #include "math/SeqRandNumGen.hpp" | 
| 69 |  | #include "mdParser/MDLexer.hpp" | 
| 70 |  | #include "mdParser/MDParser.hpp" | 
| 104 |  | const int masterNode = 0; | 
| 105 |  |  | 
| 106 |  | if (worldRank == masterNode) { | 
| 107 | < | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 107 | > | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 108 |  | #endif | 
| 109 |  | SimplePreprocessor preprocessor; | 
| 110 | < | preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); | 
| 110 | > | preprocessor.preprocess(rawMetaDataStream, filename, | 
| 111 | > | startOfMetaDataBlock, ppStream); | 
| 112 |  |  | 
| 113 |  | #ifdef IS_MPI | 
| 114 | < | //brocasting the stream size | 
| 114 | > | //broadcasting the stream size | 
| 115 |  | streamSize = ppStream.str().size() +1; | 
| 116 | < | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 117 | < | MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); | 
| 118 | < |  | 
| 116 | > | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 117 | > | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 118 | > | streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 119 |  | } else { | 
| 118 | – | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 120 |  |  | 
| 121 | < | //get stream size | 
| 121 | < | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 121 | > | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 122 |  |  | 
| 123 | + | //get stream size | 
| 124 | + | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 125 |  | char* buf = new char[streamSize]; | 
| 126 |  | assert(buf); | 
| 127 |  |  | 
| 128 |  | //receive file content | 
| 129 | < | MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 130 | < |  | 
| 129 | > | MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 130 | > |  | 
| 131 |  | ppStream.str(buf); | 
| 132 |  | delete [] buf; | 
| 133 |  | } | 
| 151 |  | parser.initializeASTFactory(factory); | 
| 152 |  | parser.setASTFactory(&factory); | 
| 153 |  | parser.mdfile(); | 
| 152 | – |  | 
| 154 |  | // Create a tree parser that reads information into Globals | 
| 155 |  | MDTreeParser treeParser; | 
| 156 |  | treeParser.initializeASTFactory(factory); | 
| 228 |  | catch (OpenMDException& e) { | 
| 229 |  | sprintf(painCave.errMsg, | 
| 230 |  | "%s\n", | 
| 231 | < | e.getMessage().c_str()); | 
| 231 | > | e.what()); | 
| 232 |  | painCave.isFatal = 1; | 
| 233 |  | simError(); | 
| 234 |  | } | 
| 264 |  | version.append("."); | 
| 265 |  | version.append(OPENMD_VERSION_MINOR); | 
| 266 |  |  | 
| 267 | < | std::string svnrev; | 
| 267 | > | std::string svnrev(g_REVISION, strnlen(g_REVISION, 20)); | 
| 268 |  | //convert a macro from compiler to a string in c++ | 
| 269 | < | STR_DEFINE(svnrev, SVN_REV ); | 
| 269 | > | // STR_DEFINE(svnrev, SVN_REV ); | 
| 270 |  | version.append(" Revision: "); | 
| 271 |  | // If there's no SVN revision, just call this the RELEASE revision. | 
| 272 |  | if (!svnrev.empty()) { | 
| 521 |  | // error | 
| 522 |  | // condition: | 
| 523 |  |  | 
| 524 | < | nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 524 | > | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | 
| 525 |  |  | 
| 526 |  | if (nProcessors > nGlobalMols) { | 
| 527 |  | sprintf(painCave.errMsg, | 
| 640 |  | delete myRandom; | 
| 641 |  |  | 
| 642 |  | // Spray out this nonsense to all other processors: | 
| 643 | < | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 643 | > | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 644 | > |  | 
| 645 |  | } else { | 
| 646 |  |  | 
| 647 |  | // Listen to your marching orders from processor 0: | 
| 648 | < | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 648 | > | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 649 |  |  | 
| 650 |  | } | 
| 651 |  |  | 
| 785 |  | } | 
| 786 |  | } | 
| 787 |  |  | 
| 788 | < | if (simParams->getOutputElectricField() | simParams->haveElectricField()) { | 
| 788 | > | if (simParams->getOutputElectricField() | | 
| 789 | > | simParams->haveElectricField() | simParams->haveUniformField() | | 
| 790 | > | simParams->haveUniformGradientStrength() | | 
| 791 | > | simParams->haveUniformGradientDirection1() | | 
| 792 | > | simParams->haveUniformGradientDirection2() ) { | 
| 793 |  | storageLayout |= DataStorage::dslElectricField; | 
| 794 |  | } | 
| 795 |  |  | 
| 796 | + | if (simParams->getOutputSitePotential() ) { | 
| 797 | + | storageLayout |= DataStorage::dslSitePotential; | 
| 798 | + | } | 
| 799 | + |  | 
| 800 |  | if (simParams->getOutputFluctuatingCharges()) { | 
| 801 |  | storageLayout |= DataStorage::dslFlucQPosition; | 
| 802 |  | storageLayout |= DataStorage::dslFlucQVelocity; | 
| 927 |  | // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | 
| 928 |  | // docs said we could. | 
| 929 |  | std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | 
| 930 | < | MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], | 
| 931 | < | &tmpGroupMembership[0], nGlobalAtoms, | 
| 932 | < | MPI::INT, MPI::SUM); | 
| 930 | > | MPI_Allreduce(&globalGroupMembership[0], | 
| 931 | > | &tmpGroupMembership[0], nGlobalAtoms, | 
| 932 | > | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 933 | > |  | 
| 934 |  | info->setGlobalGroupMembership(tmpGroupMembership); | 
| 935 |  | #else | 
| 936 |  | info->setGlobalGroupMembership(globalGroupMembership); | 
| 954 |  | #ifdef IS_MPI | 
| 955 |  | std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + | 
| 956 |  | info->getNGlobalRigidBodies(), 0); | 
| 957 | < | MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 958 | < | nGlobalAtoms + nGlobalRigidBodies, | 
| 959 | < | MPI::INT, MPI::SUM); | 
| 957 | > | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 958 | > | nGlobalAtoms + nGlobalRigidBodies, | 
| 959 | > | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 960 |  |  | 
| 961 |  | info->setGlobalMolMembership(tmpMolMembership); | 
| 962 |  | #else | 
| 974 |  |  | 
| 975 |  | #ifdef IS_MPI | 
| 976 |  | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 977 | < | MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 978 | < | info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 977 | > | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 978 | > | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 979 |  | #else | 
| 980 |  | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 981 |  | #endif |