| 105 |  |  | 
| 106 |  | if (worldRank == masterNode) { | 
| 107 |  | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 108 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 108 |  | #endif | 
| 109 |  | SimplePreprocessor preprocessor; | 
| 110 |  | preprocessor.preprocess(rawMetaDataStream, filename, | 
| 114 |  | //broadcasting the stream size | 
| 115 |  | streamSize = ppStream.str().size() +1; | 
| 116 |  | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 117 | < | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 117 | > | MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 118 |  | streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 120 | – |  | 
| 121 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 122 | – | // MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 123 | – | //                       streamSize, MPI::CHAR, masterNode); | 
| 124 | – |  | 
| 119 |  | } else { | 
| 120 |  |  | 
| 121 |  | MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 128 | – | // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 122 |  |  | 
| 123 |  | //get stream size | 
| 124 |  | MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 132 | – | // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 125 |  | char* buf = new char[streamSize]; | 
| 126 |  | assert(buf); | 
| 127 |  |  | 
| 128 |  | //receive file content | 
| 129 |  | MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 138 | – | // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 130 |  |  | 
| 131 |  | ppStream.str(buf); | 
| 132 |  | delete [] buf; | 
| 228 |  | catch (OpenMDException& e) { | 
| 229 |  | sprintf(painCave.errMsg, | 
| 230 |  | "%s\n", | 
| 231 | < | e.getMessage().c_str()); | 
| 231 | > | e.what()); | 
| 232 |  | painCave.isFatal = 1; | 
| 233 |  | simError(); | 
| 234 |  | } | 
| 522 |  | // condition: | 
| 523 |  |  | 
| 524 |  | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | 
| 534 | – | //nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 525 |  |  | 
| 526 |  | if (nProcessors > nGlobalMols) { | 
| 527 |  | sprintf(painCave.errMsg, | 
| 641 |  |  | 
| 642 |  | // Spray out this nonsense to all other processors: | 
| 643 |  | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 644 | < | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 644 | > |  | 
| 645 |  | } else { | 
| 646 |  |  | 
| 647 |  | // Listen to your marching orders from processor 0: | 
| 648 |  | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 659 | – | // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 649 |  |  | 
| 650 |  | } | 
| 651 |  |  | 
| 789 |  | storageLayout |= DataStorage::dslElectricField; | 
| 790 |  | } | 
| 791 |  |  | 
| 792 | + | if (simParams->getOutputSitePotential() ) { | 
| 793 | + | storageLayout |= DataStorage::dslSitePotential; | 
| 794 | + | } | 
| 795 | + |  | 
| 796 |  | if (simParams->getOutputFluctuatingCharges()) { | 
| 797 |  | storageLayout |= DataStorage::dslFlucQPosition; | 
| 798 |  | storageLayout |= DataStorage::dslFlucQVelocity; | 
| 926 |  | MPI_Allreduce(&globalGroupMembership[0], | 
| 927 |  | &tmpGroupMembership[0], nGlobalAtoms, | 
| 928 |  | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 929 | < | // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], | 
| 937 | < | //                           &tmpGroupMembership[0], nGlobalAtoms, | 
| 938 | < | //                           MPI::INT, MPI::SUM); | 
| 929 | > |  | 
| 930 |  | info->setGlobalGroupMembership(tmpGroupMembership); | 
| 931 |  | #else | 
| 932 |  | info->setGlobalGroupMembership(globalGroupMembership); | 
| 953 |  | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 954 |  | nGlobalAtoms + nGlobalRigidBodies, | 
| 955 |  | MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 965 | – | // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], | 
| 966 | – | //                           nGlobalAtoms + nGlobalRigidBodies, | 
| 967 | – | //                           MPI::INT, MPI::SUM); | 
| 956 |  |  | 
| 957 |  | info->setGlobalMolMembership(tmpMolMembership); | 
| 958 |  | #else | 
| 972 |  | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 973 |  | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 974 |  | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 987 | – | // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], | 
| 988 | – | //                           info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 975 |  | #else | 
| 976 |  | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 977 |  | #endif |