35 |
|
* |
36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
< |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
38 |
> |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
39 |
|
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
|
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
99 |
|
#ifdef IS_MPI |
100 |
|
int streamSize; |
101 |
|
const int masterNode = 0; |
102 |
< |
int commStatus; |
102 |
> |
|
103 |
|
if (worldRank == masterNode) { |
104 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
104 |
> |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
105 |
|
#endif |
106 |
|
SimplePreprocessor preprocessor; |
107 |
|
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
109 |
|
#ifdef IS_MPI |
110 |
|
//brocasting the stream size |
111 |
|
streamSize = ppStream.str().size() +1; |
112 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
113 |
< |
|
114 |
< |
commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
115 |
< |
|
116 |
< |
|
112 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
113 |
> |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
114 |
> |
|
115 |
|
} else { |
116 |
+ |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
117 |
|
|
119 |
– |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
120 |
– |
|
118 |
|
//get stream size |
119 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
119 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
120 |
|
|
121 |
|
char* buf = new char[streamSize]; |
122 |
|
assert(buf); |
123 |
|
|
124 |
|
//receive file content |
125 |
< |
commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
125 |
> |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
126 |
|
|
127 |
|
ppStream.str(buf); |
128 |
|
delete [] buf; |
132 |
– |
|
129 |
|
} |
130 |
|
#endif |
131 |
|
// Create a scanner that reads from the input stream |
516 |
|
RealType x; |
517 |
|
RealType y; |
518 |
|
RealType a; |
523 |
– |
int old_atoms; |
524 |
– |
int add_atoms; |
525 |
– |
int new_atoms; |
526 |
– |
int nTarget; |
527 |
– |
int done; |
528 |
– |
int i; |
529 |
– |
int loops; |
530 |
– |
int which_proc; |
519 |
|
int nProcessors; |
520 |
|
std::vector<int> atomsPerProc; |
521 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
522 |
< |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
522 |
> |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an |
523 |
> |
// error |
524 |
> |
// condition: |
525 |
|
|
526 |
|
nProcessors = MPI::COMM_WORLD.Get_size(); |
527 |
|
|
532 |
|
"\tthe number of molecules. This will not result in a \n" |
533 |
|
"\tusable division of atoms for force decomposition.\n" |
534 |
|
"\tEither try a smaller number of processors, or run the\n" |
535 |
< |
"\tsingle-processor version of OpenMD.\n", nProcessors, nGlobalMols); |
535 |
> |
"\tsingle-processor version of OpenMD.\n", nProcessors, |
536 |
> |
nGlobalMols); |
537 |
|
|
538 |
|
painCave.isFatal = 1; |
539 |
|
simError(); |
540 |
|
} |
541 |
|
|
551 |
– |
int seedValue; |
542 |
|
Globals * simParams = info->getSimParams(); |
543 |
< |
SeqRandNumGen* myRandom; //divide labor does not need Parallel random number generator |
543 |
> |
SeqRandNumGen* myRandom; //divide labor does not need Parallel |
544 |
> |
//random number generator |
545 |
|
if (simParams->haveSeed()) { |
546 |
< |
seedValue = simParams->getSeed(); |
546 |
> |
int seedValue = simParams->getSeed(); |
547 |
|
myRandom = new SeqRandNumGen(seedValue); |
548 |
|
}else { |
549 |
|
myRandom = new SeqRandNumGen(); |
559 |
|
numerator = info->getNGlobalAtoms(); |
560 |
|
denominator = nProcessors; |
561 |
|
precast = numerator / denominator; |
562 |
< |
nTarget = (int)(precast + 0.5); |
562 |
> |
int nTarget = (int)(precast + 0.5); |
563 |
|
|
564 |
< |
for(i = 0; i < nGlobalMols; i++) { |
564 |
> |
for(int i = 0; i < nGlobalMols; i++) { |
565 |
|
|
566 |
< |
done = 0; |
567 |
< |
loops = 0; |
566 |
> |
int done = 0; |
567 |
> |
int loops = 0; |
568 |
|
|
569 |
|
while (!done) { |
570 |
|
loops++; |
571 |
|
|
572 |
|
// Pick a processor at random |
573 |
|
|
574 |
< |
which_proc = (int) (myRandom->rand() * nProcessors); |
574 |
> |
int which_proc = (int) (myRandom->rand() * nProcessors); |
575 |
|
|
576 |
|
//get the molecule stamp first |
577 |
|
int stampId = info->getMoleculeStampId(i); |
578 |
|
MoleculeStamp * moleculeStamp = info->getMoleculeStamp(stampId); |
579 |
|
|
580 |
|
// How many atoms does this processor have so far? |
581 |
< |
old_atoms = atomsPerProc[which_proc]; |
582 |
< |
add_atoms = moleculeStamp->getNAtoms(); |
583 |
< |
new_atoms = old_atoms + add_atoms; |
581 |
> |
int old_atoms = atomsPerProc[which_proc]; |
582 |
> |
int add_atoms = moleculeStamp->getNAtoms(); |
583 |
> |
int new_atoms = old_atoms + add_atoms; |
584 |
|
|
585 |
|
// If we've been through this loop too many times, we need |
586 |
|
// to just give up and assign the molecule to this processor |
786 |
|
} |
787 |
|
} |
788 |
|
|
789 |
< |
if (simParams->getOutputElectricField()) { |
789 |
> |
if (simParams->getOutputElectricField() | simParams->haveElectricField()) { |
790 |
|
storageLayout |= DataStorage::dslElectricField; |
791 |
|
} |
792 |
|
|
796 |
|
storageLayout |= DataStorage::dslFlucQForce; |
797 |
|
} |
798 |
|
|
799 |
+ |
info->setStorageLayout(storageLayout); |
800 |
+ |
|
801 |
|
return storageLayout; |
802 |
|
} |
803 |
|
|