| 255 |
|
std::string mdRawData; |
| 256 |
|
int metaDataBlockStart = -1; |
| 257 |
|
int metaDataBlockEnd = -1; |
| 258 |
< |
int i; |
| 258 |
> |
int i, j; |
| 259 |
|
streamoff mdOffset(0); |
| 260 |
|
int mdFileVersion; |
| 261 |
|
|
| 262 |
+ |
// Create a string for embedding the version information in the MetaData |
| 263 |
+ |
std::string version; |
| 264 |
+ |
version.assign("## Last run using OpenMD Version: "); |
| 265 |
+ |
version.append(OPENMD_VERSION_MAJOR); |
| 266 |
+ |
version.append("."); |
| 267 |
+ |
version.append(OPENMD_VERSION_MINOR); |
| 268 |
|
|
| 269 |
+ |
std::string svnrev; |
| 270 |
+ |
//convert a macro from compiler to a string in c++ |
| 271 |
+ |
STR_DEFINE(svnrev, SVN_REV ); |
| 272 |
+ |
version.append(" Revision: "); |
| 273 |
+ |
// If there's no SVN revision, just call this the RELEASE revision. |
| 274 |
+ |
if (!svnrev.empty()) { |
| 275 |
+ |
version.append(svnrev); |
| 276 |
+ |
} else { |
| 277 |
+ |
version.append("RELEASE"); |
| 278 |
+ |
} |
| 279 |
+ |
|
| 280 |
|
#ifdef IS_MPI |
| 281 |
|
const int masterNode = 0; |
| 282 |
|
if (worldRank == masterNode) { |
| 371 |
|
|
| 372 |
|
mdRawData.clear(); |
| 373 |
|
|
| 374 |
+ |
bool foundVersion = false; |
| 375 |
+ |
|
| 376 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
| 377 |
|
mdFile_.getline(buffer, bufferSize); |
| 378 |
< |
mdRawData += buffer; |
| 378 |
> |
std::string line = trimLeftCopy(buffer); |
| 379 |
> |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
| 380 |
> |
if (static_cast<size_t>(j) != string::npos) { |
| 381 |
> |
foundVersion = true; |
| 382 |
> |
mdRawData += version; |
| 383 |
> |
} else { |
| 384 |
> |
mdRawData += buffer; |
| 385 |
> |
} |
| 386 |
|
mdRawData += "\n"; |
| 387 |
|
} |
| 388 |
< |
|
| 388 |
> |
|
| 389 |
> |
if (!foundVersion) mdRawData += version + "\n"; |
| 390 |
> |
|
| 391 |
|
mdFile_.close(); |
| 392 |
|
|
| 393 |
|
#ifdef IS_MPI |
| 534 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 535 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 536 |
|
|
| 537 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 537 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 538 |
|
|
| 539 |
|
if (nProcessors > nGlobalMols) { |
| 540 |
|
sprintf(painCave.errMsg, |
| 572 |
|
nTarget = (int)(precast + 0.5); |
| 573 |
|
|
| 574 |
|
for(i = 0; i < nGlobalMols; i++) { |
| 575 |
+ |
|
| 576 |
|
done = 0; |
| 577 |
|
loops = 0; |
| 578 |
|
|
| 597 |
|
// and be done with it. |
| 598 |
|
|
| 599 |
|
if (loops > 100) { |
| 600 |
+ |
|
| 601 |
|
sprintf(painCave.errMsg, |
| 602 |
< |
"I've tried 100 times to assign molecule %d to a " |
| 603 |
< |
" processor, but can't find a good spot.\n" |
| 604 |
< |
"I'm assigning it at random to processor %d.\n", |
| 602 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
| 603 |
> |
"\tunderworked processor, but there's no good place to\n" |
| 604 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
| 605 |
|
i, which_proc); |
| 606 |
< |
|
| 606 |
> |
|
| 607 |
|
painCave.isFatal = 0; |
| 608 |
+ |
painCave.severity = OPENMD_INFO; |
| 609 |
|
simError(); |
| 610 |
|
|
| 611 |
|
molToProcMap[i] = which_proc; |
| 650 |
|
} |
| 651 |
|
|
| 652 |
|
delete myRandom; |
| 653 |
< |
|
| 653 |
> |
|
| 654 |
|
// Spray out this nonsense to all other processors: |
| 655 |
< |
|
| 625 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 655 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 656 |
|
} else { |
| 657 |
|
|
| 658 |
|
// Listen to your marching orders from processor 0: |
| 659 |
< |
|
| 660 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 659 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 660 |
> |
|
| 661 |
|
} |
| 662 |
|
|
| 663 |
|
info->setMolToProcMap(molToProcMap); |
| 815 |
|
int beginRigidBodyIndex; |
| 816 |
|
int beginCutoffGroupIndex; |
| 817 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
| 818 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
| 819 |
|
|
| 820 |
|
beginAtomIndex = 0; |
| 821 |
|
//rigidbody's index begins right after atom's |
| 882 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 883 |
|
// docs said we could. |
| 884 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 885 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 886 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 885 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 886 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 887 |
> |
MPI::INT, MPI::SUM); |
| 888 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 889 |
|
#else |
| 890 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 891 |
|
#endif |
| 892 |
|
|
| 893 |
|
//fill molMembership |
| 894 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
| 894 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
| 895 |
> |
info->getNGlobalRigidBodies(), 0); |
| 896 |
|
|
| 897 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 897 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
| 898 |
> |
mol = info->nextMolecule(mi)) { |
| 899 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
| 900 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
| 901 |
|
} |
| 902 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
| 903 |
+ |
rb = mol->nextRigidBody(ri)) { |
| 904 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
| 905 |
+ |
} |
| 906 |
|
} |
| 907 |
|
|
| 908 |
|
#ifdef IS_MPI |
| 909 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 909 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
| 910 |
> |
info->getNGlobalRigidBodies(), 0); |
| 911 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 912 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
| 913 |
> |
MPI::INT, MPI::SUM); |
| 914 |
|
|
| 873 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 874 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 875 |
– |
|
| 915 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 916 |
|
#else |
| 917 |
|
info->setGlobalMolMembership(globalMolMembership); |
| 921 |
|
// here the molecules are listed by their global indices. |
| 922 |
|
|
| 923 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
| 924 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 924 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 925 |
> |
mol = info->nextMolecule(mi)) { |
| 926 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
| 927 |
|
} |
| 928 |
|
|
| 929 |
|
#ifdef IS_MPI |
| 930 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 931 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 932 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 931 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 932 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 933 |
|
#else |
| 934 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 935 |
|
#endif |
| 943 |
|
} |
| 944 |
|
|
| 945 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
| 946 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 946 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 947 |
> |
mol = info->nextMolecule(mi)) { |
| 948 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
| 949 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
| 950 |
|
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |