# | Line 1 | Line 1 | |
---|---|---|
1 | /* | |
2 | < | * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. |
2 | > | * copyright (c) 2005 The University of Notre Dame. All Rights Reserved. |
3 | * | |
4 | * The University of Notre Dame grants you ("Licensee") a | |
5 | * non-exclusive, royalty free, license to use, modify and | |
# | Line 100 | Line 100 | namespace OpenMD { | |
100 | #ifdef IS_MPI | |
101 | int streamSize; | |
102 | const int masterNode = 0; | |
103 | < | int commStatus; |
103 | > | |
104 | if (worldRank == masterNode) { | |
105 | < | commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
105 | > | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
106 | #endif | |
107 | SimplePreprocessor preprocessor; | |
108 | < | preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
108 | > | preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, |
109 | > | ppStream); |
110 | ||
111 | #ifdef IS_MPI | |
112 | //brocasting the stream size | |
113 | streamSize = ppStream.str().size() +1; | |
114 | < | commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
115 | < | |
116 | < | commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
116 | < | |
114 | > | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
115 | > | MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
116 | > | streamSize, MPI::CHAR, masterNode); |
117 | ||
118 | } else { | |
119 | ||
120 | < | commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
120 | > | MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
121 | ||
122 | //get stream size | |
123 | < | commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
123 | > | MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
124 | ||
125 | char* buf = new char[streamSize]; | |
126 | assert(buf); | |
127 | ||
128 | //receive file content | |
129 | < | commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
129 | > | MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
130 | ||
131 | ppStream.str(buf); | |
132 | delete [] buf; | |
# | Line 499 | Line 499 | namespace OpenMD { | |
499 | int nTarget; | |
500 | int done; | |
501 | int i; | |
502 | – | int j; |
502 | int loops; | |
503 | int which_proc; | |
504 | int nProcessors; | |
# | Line 507 | Line 506 | namespace OpenMD { | |
506 | int nGlobalMols = info->getNGlobalMolecules(); | |
507 | std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: | |
508 | ||
509 | < | MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
509 | > | nProcessors = MPI::COMM_WORLD.Get_size(); |
510 | ||
511 | if (nProcessors > nGlobalMols) { | |
512 | sprintf(painCave.errMsg, | |
# | Line 622 | Line 621 | namespace OpenMD { | |
621 | delete myRandom; | |
622 | ||
623 | // Spray out this nonsense to all other processors: | |
624 | < | |
626 | < | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
624 | > | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
625 | } else { | |
626 | ||
627 | // Listen to your marching orders from processor 0: | |
628 | < | |
631 | < | MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
628 | > | MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
629 | } | |
630 | ||
631 | info->setMolToProcMap(molToProcMap); | |
# | Line 852 | Line 849 | namespace OpenMD { | |
849 | // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | |
850 | // docs said we could. | |
851 | std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | |
852 | < | MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
853 | < | MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
852 | > | MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
853 | > | &tmpGroupMembership[0], nGlobalAtoms, |
854 | > | MPI::INT, MPI::SUM); |
855 | info->setGlobalGroupMembership(tmpGroupMembership); | |
856 | #else | |
857 | info->setGlobalGroupMembership(globalGroupMembership); | |
# | Line 870 | Line 868 | namespace OpenMD { | |
868 | ||
869 | #ifdef IS_MPI | |
870 | std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); | |
871 | < | |
872 | < | MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
873 | < | MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
871 | > | MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
872 | > | nGlobalAtoms, |
873 | > | MPI::INT, MPI::SUM); |
874 | ||
875 | info->setGlobalMolMembership(tmpMolMembership); | |
876 | #else | |
# | Line 889 | Line 887 | namespace OpenMD { | |
887 | ||
888 | #ifdef IS_MPI | |
889 | std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | |
890 | < | MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
891 | < | info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
890 | > | MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
891 | > | info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
892 | #else | |
893 | std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | |
894 | #endif | |
# | Line 920 | Line 918 | namespace OpenMD { | |
918 | } | |
919 | ||
920 | void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { | |
923 | – | Globals* simParams; |
921 | ||
925 | – | simParams = info->getSimParams(); |
926 | – | |
922 | DumpReader reader(info, mdFileName); | |
923 | int nframes = reader.getNFrames(); | |
924 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |