| 64 |
|
#include "brains/ForceField.hpp" |
| 65 |
|
#include "utils/simError.h" |
| 66 |
|
#include "utils/StringUtils.hpp" |
| 67 |
+ |
#include "utils/Revision.hpp" |
| 68 |
|
#include "math/SeqRandNumGen.hpp" |
| 69 |
|
#include "mdParser/MDLexer.hpp" |
| 70 |
|
#include "mdParser/MDParser.hpp" |
| 104 |
|
const int masterNode = 0; |
| 105 |
|
|
| 106 |
|
if (worldRank == masterNode) { |
| 107 |
< |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 107 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 108 |
> |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 109 |
|
#endif |
| 110 |
|
SimplePreprocessor preprocessor; |
| 111 |
< |
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
| 111 |
> |
preprocessor.preprocess(rawMetaDataStream, filename, |
| 112 |
> |
startOfMetaDataBlock, ppStream); |
| 113 |
|
|
| 114 |
|
#ifdef IS_MPI |
| 115 |
< |
//brocasting the stream size |
| 115 |
> |
//broadcasting the stream size |
| 116 |
|
streamSize = ppStream.str().size() +1; |
| 117 |
< |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 118 |
< |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
| 117 |
> |
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 118 |
> |
MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
| 119 |
> |
streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 120 |
> |
|
| 121 |
> |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 122 |
> |
// MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
| 123 |
> |
// streamSize, MPI::CHAR, masterNode); |
| 124 |
|
|
| 125 |
|
} else { |
| 118 |
– |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 126 |
|
|
| 127 |
< |
//get stream size |
| 128 |
< |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 127 |
> |
MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 128 |
> |
// MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 129 |
|
|
| 130 |
+ |
//get stream size |
| 131 |
+ |
MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 132 |
+ |
// MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 133 |
|
char* buf = new char[streamSize]; |
| 134 |
|
assert(buf); |
| 135 |
|
|
| 136 |
|
//receive file content |
| 137 |
< |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
| 138 |
< |
|
| 137 |
> |
MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 138 |
> |
// MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
| 139 |
> |
|
| 140 |
|
ppStream.str(buf); |
| 141 |
|
delete [] buf; |
| 142 |
|
} |
| 160 |
|
parser.initializeASTFactory(factory); |
| 161 |
|
parser.setASTFactory(&factory); |
| 162 |
|
parser.mdfile(); |
| 152 |
– |
|
| 163 |
|
// Create a tree parser that reads information into Globals |
| 164 |
|
MDTreeParser treeParser; |
| 165 |
|
treeParser.initializeASTFactory(factory); |
| 273 |
|
version.append("."); |
| 274 |
|
version.append(OPENMD_VERSION_MINOR); |
| 275 |
|
|
| 276 |
< |
std::string svnrev; |
| 276 |
> |
std::string svnrev(g_REVISION, strnlen(g_REVISION, 20)); |
| 277 |
|
//convert a macro from compiler to a string in c++ |
| 278 |
< |
STR_DEFINE(svnrev, SVN_REV ); |
| 278 |
> |
// STR_DEFINE(svnrev, SVN_REV ); |
| 279 |
|
version.append(" Revision: "); |
| 280 |
|
// If there's no SVN revision, just call this the RELEASE revision. |
| 281 |
|
if (!svnrev.empty()) { |
| 530 |
|
// error |
| 531 |
|
// condition: |
| 532 |
|
|
| 533 |
< |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 533 |
> |
MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); |
| 534 |
> |
//nProcessors = MPI::COMM_WORLD.Get_size(); |
| 535 |
|
|
| 536 |
|
if (nProcessors > nGlobalMols) { |
| 537 |
|
sprintf(painCave.errMsg, |
| 650 |
|
delete myRandom; |
| 651 |
|
|
| 652 |
|
// Spray out this nonsense to all other processors: |
| 653 |
< |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 653 |
> |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 654 |
> |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 655 |
|
} else { |
| 656 |
|
|
| 657 |
|
// Listen to your marching orders from processor 0: |
| 658 |
< |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 658 |
> |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 659 |
> |
// MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 660 |
|
|
| 661 |
|
} |
| 662 |
|
|
| 930 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 931 |
|
// docs said we could. |
| 932 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 933 |
< |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 934 |
< |
&tmpGroupMembership[0], nGlobalAtoms, |
| 935 |
< |
MPI::INT, MPI::SUM); |
| 933 |
> |
MPI_Allreduce(&globalGroupMembership[0], |
| 934 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 935 |
> |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 936 |
> |
// MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 937 |
> |
// &tmpGroupMembership[0], nGlobalAtoms, |
| 938 |
> |
// MPI::INT, MPI::SUM); |
| 939 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 940 |
|
#else |
| 941 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 959 |
|
#ifdef IS_MPI |
| 960 |
|
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
| 961 |
|
info->getNGlobalRigidBodies(), 0); |
| 962 |
< |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 963 |
< |
nGlobalAtoms + nGlobalRigidBodies, |
| 964 |
< |
MPI::INT, MPI::SUM); |
| 962 |
> |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 963 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
| 964 |
> |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 965 |
> |
// MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 966 |
> |
// nGlobalAtoms + nGlobalRigidBodies, |
| 967 |
> |
// MPI::INT, MPI::SUM); |
| 968 |
|
|
| 969 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 970 |
|
#else |
| 982 |
|
|
| 983 |
|
#ifdef IS_MPI |
| 984 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 985 |
< |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 986 |
< |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 985 |
> |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 986 |
> |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 987 |
> |
// MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 988 |
> |
// info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 989 |
|
#else |
| 990 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 991 |
|
#endif |