| 35 |
|
* |
| 36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
| 37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
| 38 |
< |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
| 38 |
> |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
| 39 |
|
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
| 40 |
|
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
| 41 |
|
*/ |
| 44 |
|
* @file SimCreator.cpp |
| 45 |
|
* @author tlin |
| 46 |
|
* @date 11/03/2004 |
| 47 |
– |
* @time 13:51am |
| 47 |
|
* @version 1.0 |
| 48 |
|
*/ |
| 49 |
|
#include <exception> |
| 55 |
|
#include "brains/SimCreator.hpp" |
| 56 |
|
#include "brains/SimSnapshotManager.hpp" |
| 57 |
|
#include "io/DumpReader.hpp" |
| 58 |
< |
#include "UseTheForce/ForceFieldFactory.hpp" |
| 58 |
> |
#include "brains/ForceField.hpp" |
| 59 |
|
#include "utils/simError.h" |
| 60 |
|
#include "utils/StringUtils.hpp" |
| 61 |
|
#include "math/SeqRandNumGen.hpp" |
| 99 |
|
#ifdef IS_MPI |
| 100 |
|
int streamSize; |
| 101 |
|
const int masterNode = 0; |
| 102 |
< |
int commStatus; |
| 102 |
> |
|
| 103 |
|
if (worldRank == masterNode) { |
| 104 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 104 |
> |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 105 |
|
#endif |
| 106 |
|
SimplePreprocessor preprocessor; |
| 107 |
|
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
| 109 |
|
#ifdef IS_MPI |
| 110 |
|
//brocasting the stream size |
| 111 |
|
streamSize = ppStream.str().size() +1; |
| 112 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
| 113 |
< |
|
| 114 |
< |
commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 116 |
< |
|
| 117 |
< |
|
| 112 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 113 |
> |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
| 114 |
> |
|
| 115 |
|
} else { |
| 116 |
+ |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
| 117 |
|
|
| 120 |
– |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
| 121 |
– |
|
| 118 |
|
//get stream size |
| 119 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
| 119 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
| 120 |
|
|
| 121 |
|
char* buf = new char[streamSize]; |
| 122 |
|
assert(buf); |
| 123 |
|
|
| 124 |
|
//receive file content |
| 125 |
< |
commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
| 125 |
> |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
| 126 |
|
|
| 127 |
|
ppStream.str(buf); |
| 128 |
|
delete [] buf; |
| 133 |
– |
|
| 129 |
|
} |
| 130 |
|
#endif |
| 131 |
|
// Create a scanner that reads from the input stream |
| 250 |
|
std::string mdRawData; |
| 251 |
|
int metaDataBlockStart = -1; |
| 252 |
|
int metaDataBlockEnd = -1; |
| 253 |
< |
int i; |
| 254 |
< |
int mdOffset; |
| 253 |
> |
int i, j; |
| 254 |
> |
streamoff mdOffset; |
| 255 |
|
int mdFileVersion; |
| 256 |
|
|
| 257 |
+ |
// Create a string for embedding the version information in the MetaData |
| 258 |
+ |
std::string version; |
| 259 |
+ |
version.assign("## Last run using OpenMD Version: "); |
| 260 |
+ |
version.append(OPENMD_VERSION_MAJOR); |
| 261 |
+ |
version.append("."); |
| 262 |
+ |
version.append(OPENMD_VERSION_MINOR); |
| 263 |
+ |
|
| 264 |
+ |
std::string svnrev; |
| 265 |
+ |
//convert a macro from compiler to a string in c++ |
| 266 |
+ |
STR_DEFINE(svnrev, SVN_REV ); |
| 267 |
+ |
version.append(" Revision: "); |
| 268 |
+ |
// If there's no SVN revision, just call this the RELEASE revision. |
| 269 |
+ |
if (!svnrev.empty()) { |
| 270 |
+ |
version.append(svnrev); |
| 271 |
+ |
} else { |
| 272 |
+ |
version.append("RELEASE"); |
| 273 |
+ |
} |
| 274 |
+ |
|
| 275 |
|
#ifdef IS_MPI |
| 276 |
|
const int masterNode = 0; |
| 277 |
|
if (worldRank == masterNode) { |
| 278 |
|
#endif |
| 279 |
|
|
| 280 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
| 280 |
> |
std::ifstream mdFile_; |
| 281 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
| 282 |
|
|
| 283 |
|
if (mdFile_.fail()) { |
| 284 |
|
sprintf(painCave.errMsg, |
| 366 |
|
|
| 367 |
|
mdRawData.clear(); |
| 368 |
|
|
| 369 |
+ |
bool foundVersion = false; |
| 370 |
+ |
|
| 371 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
| 372 |
|
mdFile_.getline(buffer, bufferSize); |
| 373 |
< |
mdRawData += buffer; |
| 373 |
> |
std::string line = trimLeftCopy(buffer); |
| 374 |
> |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
| 375 |
> |
if (static_cast<size_t>(j) != string::npos) { |
| 376 |
> |
foundVersion = true; |
| 377 |
> |
mdRawData += version; |
| 378 |
> |
} else { |
| 379 |
> |
mdRawData += buffer; |
| 380 |
> |
} |
| 381 |
|
mdRawData += "\n"; |
| 382 |
|
} |
| 383 |
< |
|
| 383 |
> |
|
| 384 |
> |
if (!foundVersion) mdRawData += version + "\n"; |
| 385 |
> |
|
| 386 |
|
mdFile_.close(); |
| 387 |
|
|
| 388 |
|
#ifdef IS_MPI |
| 396 |
|
metaDataBlockStart + 1); |
| 397 |
|
|
| 398 |
|
//create the force field |
| 399 |
< |
ForceField * ff = ForceFieldFactory::getInstance()->createForceField(simParams->getForceField()); |
| 399 |
> |
ForceField * ff = new ForceField(simParams->getForceField()); |
| 400 |
|
|
| 401 |
|
if (ff == NULL) { |
| 402 |
|
sprintf(painCave.errMsg, |
| 516 |
|
RealType x; |
| 517 |
|
RealType y; |
| 518 |
|
RealType a; |
| 494 |
– |
int old_atoms; |
| 495 |
– |
int add_atoms; |
| 496 |
– |
int new_atoms; |
| 497 |
– |
int nTarget; |
| 498 |
– |
int done; |
| 499 |
– |
int i; |
| 500 |
– |
int j; |
| 501 |
– |
int loops; |
| 502 |
– |
int which_proc; |
| 519 |
|
int nProcessors; |
| 520 |
|
std::vector<int> atomsPerProc; |
| 521 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 522 |
< |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 522 |
> |
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an |
| 523 |
> |
// error |
| 524 |
> |
// condition: |
| 525 |
|
|
| 526 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 526 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 527 |
|
|
| 528 |
|
if (nProcessors > nGlobalMols) { |
| 529 |
|
sprintf(painCave.errMsg, |
| 532 |
|
"\tthe number of molecules. This will not result in a \n" |
| 533 |
|
"\tusable division of atoms for force decomposition.\n" |
| 534 |
|
"\tEither try a smaller number of processors, or run the\n" |
| 535 |
< |
"\tsingle-processor version of OpenMD.\n", nProcessors, nGlobalMols); |
| 535 |
> |
"\tsingle-processor version of OpenMD.\n", nProcessors, |
| 536 |
> |
nGlobalMols); |
| 537 |
|
|
| 538 |
|
painCave.isFatal = 1; |
| 539 |
|
simError(); |
| 540 |
|
} |
| 541 |
|
|
| 523 |
– |
int seedValue; |
| 542 |
|
Globals * simParams = info->getSimParams(); |
| 543 |
< |
SeqRandNumGen* myRandom; //divide labor does not need Parallel random number generator |
| 543 |
> |
SeqRandNumGen* myRandom; //divide labor does not need Parallel |
| 544 |
> |
//random number generator |
| 545 |
|
if (simParams->haveSeed()) { |
| 546 |
< |
seedValue = simParams->getSeed(); |
| 546 |
> |
int seedValue = simParams->getSeed(); |
| 547 |
|
myRandom = new SeqRandNumGen(seedValue); |
| 548 |
|
}else { |
| 549 |
|
myRandom = new SeqRandNumGen(); |
| 559 |
|
numerator = info->getNGlobalAtoms(); |
| 560 |
|
denominator = nProcessors; |
| 561 |
|
precast = numerator / denominator; |
| 562 |
< |
nTarget = (int)(precast + 0.5); |
| 562 |
> |
int nTarget = (int)(precast + 0.5); |
| 563 |
|
|
| 564 |
< |
for(i = 0; i < nGlobalMols; i++) { |
| 565 |
< |
done = 0; |
| 566 |
< |
loops = 0; |
| 564 |
> |
for(int i = 0; i < nGlobalMols; i++) { |
| 565 |
> |
|
| 566 |
> |
int done = 0; |
| 567 |
> |
int loops = 0; |
| 568 |
|
|
| 569 |
|
while (!done) { |
| 570 |
|
loops++; |
| 571 |
|
|
| 572 |
|
// Pick a processor at random |
| 573 |
|
|
| 574 |
< |
which_proc = (int) (myRandom->rand() * nProcessors); |
| 574 |
> |
int which_proc = (int) (myRandom->rand() * nProcessors); |
| 575 |
|
|
| 576 |
|
//get the molecule stamp first |
| 577 |
|
int stampId = info->getMoleculeStampId(i); |
| 578 |
|
MoleculeStamp * moleculeStamp = info->getMoleculeStamp(stampId); |
| 579 |
|
|
| 580 |
|
// How many atoms does this processor have so far? |
| 581 |
< |
old_atoms = atomsPerProc[which_proc]; |
| 582 |
< |
add_atoms = moleculeStamp->getNAtoms(); |
| 583 |
< |
new_atoms = old_atoms + add_atoms; |
| 581 |
> |
int old_atoms = atomsPerProc[which_proc]; |
| 582 |
> |
int add_atoms = moleculeStamp->getNAtoms(); |
| 583 |
> |
int new_atoms = old_atoms + add_atoms; |
| 584 |
|
|
| 585 |
|
// If we've been through this loop too many times, we need |
| 586 |
|
// to just give up and assign the molecule to this processor |
| 587 |
|
// and be done with it. |
| 588 |
|
|
| 589 |
|
if (loops > 100) { |
| 590 |
+ |
|
| 591 |
|
sprintf(painCave.errMsg, |
| 592 |
< |
"I've tried 100 times to assign molecule %d to a " |
| 593 |
< |
" processor, but can't find a good spot.\n" |
| 594 |
< |
"I'm assigning it at random to processor %d.\n", |
| 592 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
| 593 |
> |
"\tunderworked processor, but there's no good place to\n" |
| 594 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
| 595 |
|
i, which_proc); |
| 596 |
< |
|
| 596 |
> |
|
| 597 |
|
painCave.isFatal = 0; |
| 598 |
+ |
painCave.severity = OPENMD_INFO; |
| 599 |
|
simError(); |
| 600 |
|
|
| 601 |
|
molToProcMap[i] = which_proc; |
| 640 |
|
} |
| 641 |
|
|
| 642 |
|
delete myRandom; |
| 643 |
< |
|
| 643 |
> |
|
| 644 |
|
// Spray out this nonsense to all other processors: |
| 645 |
< |
|
| 624 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 645 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 646 |
|
} else { |
| 647 |
|
|
| 648 |
|
// Listen to your marching orders from processor 0: |
| 649 |
< |
|
| 650 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 649 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 650 |
> |
|
| 651 |
|
} |
| 652 |
|
|
| 653 |
|
info->setMolToProcMap(molToProcMap); |
| 694 |
|
set<AtomType*>::iterator i; |
| 695 |
|
bool hasDirectionalAtoms = false; |
| 696 |
|
bool hasFixedCharge = false; |
| 697 |
< |
bool hasMultipoles = false; |
| 697 |
> |
bool hasDipoles = false; |
| 698 |
> |
bool hasQuadrupoles = false; |
| 699 |
|
bool hasPolarizable = false; |
| 700 |
|
bool hasFluctuatingCharge = false; |
| 701 |
|
bool hasMetallic = false; |
| 717 |
|
if (da.isDirectional()){ |
| 718 |
|
hasDirectionalAtoms = true; |
| 719 |
|
} |
| 720 |
< |
if (ma.isMultipole()){ |
| 721 |
< |
hasMultipoles = true; |
| 720 |
> |
if (ma.isDipole()){ |
| 721 |
> |
hasDipoles = true; |
| 722 |
|
} |
| 723 |
+ |
if (ma.isQuadrupole()){ |
| 724 |
+ |
hasQuadrupoles = true; |
| 725 |
+ |
} |
| 726 |
|
if (ea.isEAM() || sca.isSuttonChen()){ |
| 727 |
|
hasMetallic = true; |
| 728 |
|
} |
| 746 |
|
storageLayout |= DataStorage::dslTorque; |
| 747 |
|
} |
| 748 |
|
} |
| 749 |
< |
if (hasMultipoles) { |
| 750 |
< |
storageLayout |= DataStorage::dslElectroFrame; |
| 749 |
> |
if (hasDipoles) { |
| 750 |
> |
storageLayout |= DataStorage::dslDipole; |
| 751 |
|
} |
| 752 |
+ |
if (hasQuadrupoles) { |
| 753 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
| 754 |
+ |
} |
| 755 |
|
if (hasFixedCharge || hasFluctuatingCharge) { |
| 756 |
|
storageLayout |= DataStorage::dslSkippedCharge; |
| 757 |
|
} |
| 779 |
|
if (simParams->getOutputParticlePotential()) { |
| 780 |
|
storageLayout |= DataStorage::dslParticlePot; |
| 781 |
|
} |
| 782 |
< |
if (simParams->getOutputElectricField()) { |
| 782 |
> |
|
| 783 |
> |
if (simParams->havePrintHeatFlux()) { |
| 784 |
> |
if (simParams->getPrintHeatFlux()) { |
| 785 |
> |
storageLayout |= DataStorage::dslParticlePot; |
| 786 |
> |
} |
| 787 |
> |
} |
| 788 |
> |
|
| 789 |
> |
if (simParams->getOutputElectricField() | simParams->haveElectricField()) { |
| 790 |
|
storageLayout |= DataStorage::dslElectricField; |
| 791 |
|
} |
| 792 |
+ |
|
| 793 |
|
if (simParams->getOutputFluctuatingCharges()) { |
| 794 |
|
storageLayout |= DataStorage::dslFlucQPosition; |
| 795 |
|
storageLayout |= DataStorage::dslFlucQVelocity; |
| 796 |
|
storageLayout |= DataStorage::dslFlucQForce; |
| 797 |
|
} |
| 798 |
|
|
| 799 |
+ |
info->setStorageLayout(storageLayout); |
| 800 |
+ |
|
| 801 |
|
return storageLayout; |
| 802 |
|
} |
| 803 |
|
|
| 815 |
|
int beginRigidBodyIndex; |
| 816 |
|
int beginCutoffGroupIndex; |
| 817 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
| 818 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
| 819 |
|
|
| 820 |
|
beginAtomIndex = 0; |
| 821 |
< |
beginRigidBodyIndex = 0; |
| 821 |
> |
//rigidbody's index begins right after atom's |
| 822 |
> |
beginRigidBodyIndex = info->getNGlobalAtoms(); |
| 823 |
|
beginCutoffGroupIndex = 0; |
| 824 |
|
|
| 825 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
| 882 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 883 |
|
// docs said we could. |
| 884 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 885 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 886 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 885 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 886 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 887 |
> |
MPI::INT, MPI::SUM); |
| 888 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 889 |
|
#else |
| 890 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 891 |
|
#endif |
| 892 |
|
|
| 893 |
|
//fill molMembership |
| 894 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
| 894 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
| 895 |
> |
info->getNGlobalRigidBodies(), 0); |
| 896 |
|
|
| 897 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 897 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
| 898 |
> |
mol = info->nextMolecule(mi)) { |
| 899 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
| 900 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
| 901 |
|
} |
| 902 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
| 903 |
+ |
rb = mol->nextRigidBody(ri)) { |
| 904 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
| 905 |
+ |
} |
| 906 |
|
} |
| 907 |
|
|
| 908 |
|
#ifdef IS_MPI |
| 909 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 909 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
| 910 |
> |
info->getNGlobalRigidBodies(), 0); |
| 911 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 912 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
| 913 |
> |
MPI::INT, MPI::SUM); |
| 914 |
|
|
| 864 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 865 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 866 |
– |
|
| 915 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 916 |
|
#else |
| 917 |
|
info->setGlobalMolMembership(globalMolMembership); |
| 921 |
|
// here the molecules are listed by their global indices. |
| 922 |
|
|
| 923 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
| 924 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 924 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 925 |
> |
mol = info->nextMolecule(mi)) { |
| 926 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
| 927 |
|
} |
| 928 |
|
|
| 929 |
|
#ifdef IS_MPI |
| 930 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 931 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 932 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 931 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 932 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 933 |
|
#else |
| 934 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 935 |
|
#endif |
| 943 |
|
} |
| 944 |
|
|
| 945 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
| 946 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 946 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
| 947 |
> |
mol = info->nextMolecule(mi)) { |
| 948 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
| 949 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
| 950 |
< |
for (StuntDouble* integrableObject = mol->beginIntegrableObject(ioi); integrableObject != NULL; |
| 951 |
< |
integrableObject = mol->nextIntegrableObject(ioi)) { |
| 952 |
< |
integrableObject->setGlobalIntegrableObjectIndex(globalIO); |
| 953 |
< |
IOIndexToIntegrableObject[globalIO] = integrableObject; |
| 950 |
> |
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
| 951 |
> |
sd = mol->nextIntegrableObject(ioi)) { |
| 952 |
> |
sd->setGlobalIntegrableObjectIndex(globalIO); |
| 953 |
> |
IOIndexToIntegrableObject[globalIO] = sd; |
| 954 |
|
globalIO++; |
| 955 |
|
} |
| 956 |
|
} |
| 960 |
|
} |
| 961 |
|
|
| 962 |
|
void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { |
| 913 |
– |
Globals* simParams; |
| 914 |
– |
|
| 915 |
– |
simParams = info->getSimParams(); |
| 963 |
|
|
| 964 |
|
DumpReader reader(info, mdFileName); |
| 965 |
|
int nframes = reader.getNFrames(); |
| 966 |
< |
|
| 966 |
> |
|
| 967 |
|
if (nframes > 0) { |
| 968 |
|
reader.readFrame(nframes - 1); |
| 969 |
|
} else { |