| 1 |
|
/* |
| 2 |
< |
* Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. |
| 2 |
> |
* copyright (c) 2005 The University of Notre Dame. All Rights Reserved. |
| 3 |
|
* |
| 4 |
|
* The University of Notre Dame grants you ("Licensee") a |
| 5 |
|
* non-exclusive, royalty free, license to use, modify and |
| 100 |
|
#ifdef IS_MPI |
| 101 |
|
int streamSize; |
| 102 |
|
const int masterNode = 0; |
| 103 |
< |
int commStatus; |
| 103 |
> |
|
| 104 |
|
if (worldRank == masterNode) { |
| 105 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 105 |
> |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 106 |
|
#endif |
| 107 |
|
SimplePreprocessor preprocessor; |
| 108 |
< |
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
| 108 |
> |
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, |
| 109 |
> |
ppStream); |
| 110 |
|
|
| 111 |
|
#ifdef IS_MPI |
| 112 |
|
//brocasting the stream size |
| 113 |
|
streamSize = ppStream.str().size() +1; |
| 114 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
| 115 |
< |
|
| 116 |
< |
commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 116 |
< |
|
| 114 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 115 |
> |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), |
| 116 |
> |
streamSize, MPI::CHAR, masterNode); |
| 117 |
|
|
| 118 |
|
} else { |
| 119 |
|
|
| 120 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 120 |
> |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 121 |
|
|
| 122 |
|
//get stream size |
| 123 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
| 123 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 124 |
|
|
| 125 |
|
char* buf = new char[streamSize]; |
| 126 |
|
assert(buf); |
| 127 |
|
|
| 128 |
|
//receive file content |
| 129 |
< |
commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 129 |
> |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
| 130 |
|
|
| 131 |
|
ppStream.str(buf); |
| 132 |
|
delete [] buf; |
| 256 |
|
int metaDataBlockStart = -1; |
| 257 |
|
int metaDataBlockEnd = -1; |
| 258 |
|
int i; |
| 259 |
< |
streamoff mdOffset; |
| 259 |
> |
streamoff mdOffset(0); |
| 260 |
|
int mdFileVersion; |
| 261 |
|
|
| 262 |
+ |
|
| 263 |
|
#ifdef IS_MPI |
| 264 |
|
const int masterNode = 0; |
| 265 |
|
if (worldRank == masterNode) { |
| 266 |
|
#endif |
| 267 |
|
|
| 268 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
| 268 |
> |
std::ifstream mdFile_; |
| 269 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
| 270 |
|
|
| 271 |
|
if (mdFile_.fail()) { |
| 272 |
|
sprintf(painCave.errMsg, |
| 499 |
|
int nTarget; |
| 500 |
|
int done; |
| 501 |
|
int i; |
| 500 |
– |
int j; |
| 502 |
|
int loops; |
| 503 |
|
int which_proc; |
| 504 |
|
int nProcessors; |
| 506 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 507 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 508 |
|
|
| 509 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 509 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 510 |
|
|
| 511 |
|
if (nProcessors > nGlobalMols) { |
| 512 |
|
sprintf(painCave.errMsg, |
| 544 |
|
nTarget = (int)(precast + 0.5); |
| 545 |
|
|
| 546 |
|
for(i = 0; i < nGlobalMols; i++) { |
| 547 |
+ |
|
| 548 |
|
done = 0; |
| 549 |
|
loops = 0; |
| 550 |
|
|
| 569 |
|
// and be done with it. |
| 570 |
|
|
| 571 |
|
if (loops > 100) { |
| 572 |
+ |
|
| 573 |
|
sprintf(painCave.errMsg, |
| 574 |
< |
"I've tried 100 times to assign molecule %d to a " |
| 575 |
< |
" processor, but can't find a good spot.\n" |
| 576 |
< |
"I'm assigning it at random to processor %d.\n", |
| 574 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
| 575 |
> |
"\tunderworked processor, but there's no good place to\n" |
| 576 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
| 577 |
|
i, which_proc); |
| 578 |
< |
|
| 578 |
> |
|
| 579 |
|
painCave.isFatal = 0; |
| 580 |
+ |
painCave.severity = OPENMD_INFO; |
| 581 |
|
simError(); |
| 582 |
|
|
| 583 |
|
molToProcMap[i] = which_proc; |
| 622 |
|
} |
| 623 |
|
|
| 624 |
|
delete myRandom; |
| 625 |
< |
|
| 625 |
> |
|
| 626 |
|
// Spray out this nonsense to all other processors: |
| 627 |
< |
|
| 624 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 627 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 628 |
|
} else { |
| 629 |
|
|
| 630 |
|
// Listen to your marching orders from processor 0: |
| 631 |
< |
|
| 632 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 631 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 632 |
> |
|
| 633 |
|
} |
| 634 |
|
|
| 635 |
|
info->setMolToProcMap(molToProcMap); |
| 787 |
|
int beginRigidBodyIndex; |
| 788 |
|
int beginCutoffGroupIndex; |
| 789 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
| 790 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
| 791 |
|
|
| 792 |
|
beginAtomIndex = 0; |
| 793 |
|
//rigidbody's index begins right after atom's |
| 854 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 855 |
|
// docs said we could. |
| 856 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 857 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 858 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 857 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 858 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 859 |
> |
MPI::INT, MPI::SUM); |
| 860 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 861 |
|
#else |
| 862 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 863 |
|
#endif |
| 864 |
|
|
| 865 |
|
//fill molMembership |
| 866 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
| 866 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
| 867 |
> |
info->getNGlobalRigidBodies(), 0); |
| 868 |
|
|
| 869 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 869 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
| 870 |
> |
mol = info->nextMolecule(mi)) { |
| 871 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
| 872 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
| 873 |
|
} |
| 874 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
| 875 |
+ |
rb = mol->nextRigidBody(ri)) { |
| 876 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
| 877 |
+ |
} |
| 878 |
|
} |
| 879 |
|
|
| 880 |
|
#ifdef IS_MPI |
| 881 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 881 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
| 882 |
> |
info->getNGlobalRigidBodies(), 0); |
| 883 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 884 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
| 885 |
> |
MPI::INT, MPI::SUM); |
| 886 |
|
|
| 872 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 873 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 874 |
– |
|
| 887 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 888 |
|
#else |
| 889 |
|
info->setGlobalMolMembership(globalMolMembership); |
| 893 |
|
// here the molecules are listed by their global indices. |
| 894 |
|
|
| 895 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
| 896 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 896 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 897 |
> |
mol = info->nextMolecule(mi)) { |
| 898 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
| 899 |
|
} |
| 900 |
|
|
| 901 |
|
#ifdef IS_MPI |
| 902 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 903 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 904 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 903 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 904 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 905 |
|
#else |
| 906 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 907 |
|
#endif |
| 915 |
|
} |
| 916 |
|
|
| 917 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
| 918 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 918 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 919 |
> |
mol = info->nextMolecule(mi)) { |
| 920 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
| 921 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
| 922 |
|
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
| 932 |
|
} |
| 933 |
|
|
| 934 |
|
void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { |
| 921 |
– |
Globals* simParams; |
| 935 |
|
|
| 923 |
– |
simParams = info->getSimParams(); |
| 924 |
– |
|
| 936 |
|
DumpReader reader(info, mdFileName); |
| 937 |
|
int nframes = reader.getNFrames(); |
| 938 |
|
|