36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
|
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
39 |
< |
* [4] Vardeman & Gezelter, in progress (2009). |
39 |
> |
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
> |
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
42 |
|
|
43 |
|
/** |
44 |
|
* @file SimCreator.cpp |
45 |
|
* @author tlin |
46 |
|
* @date 11/03/2004 |
46 |
– |
* @time 13:51am |
47 |
|
* @version 1.0 |
48 |
|
*/ |
49 |
|
#include <exception> |
55 |
|
#include "brains/SimCreator.hpp" |
56 |
|
#include "brains/SimSnapshotManager.hpp" |
57 |
|
#include "io/DumpReader.hpp" |
58 |
< |
#include "UseTheForce/ForceFieldFactory.hpp" |
58 |
> |
#include "brains/ForceField.hpp" |
59 |
|
#include "utils/simError.h" |
60 |
|
#include "utils/StringUtils.hpp" |
61 |
|
#include "math/SeqRandNumGen.hpp" |
75 |
|
#include "antlr/NoViableAltForCharException.hpp" |
76 |
|
#include "antlr/NoViableAltException.hpp" |
77 |
|
|
78 |
+ |
#include "types/DirectionalAdapter.hpp" |
79 |
+ |
#include "types/MultipoleAdapter.hpp" |
80 |
+ |
#include "types/EAMAdapter.hpp" |
81 |
+ |
#include "types/SuttonChenAdapter.hpp" |
82 |
+ |
#include "types/PolarizableAdapter.hpp" |
83 |
+ |
#include "types/FixedChargeAdapter.hpp" |
84 |
+ |
#include "types/FluctuatingChargeAdapter.hpp" |
85 |
+ |
|
86 |
|
#ifdef IS_MPI |
87 |
+ |
#include "mpi.h" |
88 |
|
#include "math/ParallelRandNumGen.hpp" |
89 |
|
#endif |
90 |
|
|
91 |
|
namespace OpenMD { |
92 |
|
|
93 |
< |
Globals* SimCreator::parseFile(std::istream& rawMetaDataStream, const std::string& filename, int startOfMetaDataBlock ){ |
93 |
> |
Globals* SimCreator::parseFile(std::istream& rawMetaDataStream, const std::string& filename, int mdFileVersion, int startOfMetaDataBlock ){ |
94 |
|
Globals* simParams = NULL; |
95 |
|
try { |
96 |
|
|
101 |
|
const int masterNode = 0; |
102 |
|
int commStatus; |
103 |
|
if (worldRank == masterNode) { |
104 |
< |
#endif |
105 |
< |
|
104 |
> |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
105 |
> |
#endif |
106 |
|
SimplePreprocessor preprocessor; |
107 |
|
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
108 |
|
|
115 |
|
|
116 |
|
|
117 |
|
} else { |
118 |
+ |
|
119 |
+ |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
120 |
+ |
|
121 |
|
//get stream size |
122 |
|
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
123 |
|
|
241 |
|
simError(); |
242 |
|
} |
243 |
|
|
244 |
+ |
simParams->setMDfileVersion(mdFileVersion); |
245 |
|
return simParams; |
246 |
|
} |
247 |
|
|
254 |
|
std::string mdRawData; |
255 |
|
int metaDataBlockStart = -1; |
256 |
|
int metaDataBlockEnd = -1; |
257 |
< |
int i; |
258 |
< |
int mdOffset; |
257 |
> |
int i, j; |
258 |
> |
streamoff mdOffset; |
259 |
> |
int mdFileVersion; |
260 |
|
|
261 |
+ |
// Create a string for embedding the version information in the MetaData |
262 |
+ |
std::string version; |
263 |
+ |
version.assign("## Last run using OpenMD Version: "); |
264 |
+ |
version.append(OPENMD_VERSION_MAJOR); |
265 |
+ |
version.append("."); |
266 |
+ |
version.append(OPENMD_VERSION_MINOR); |
267 |
+ |
|
268 |
+ |
std::string svnrev; |
269 |
+ |
//convert a macro from compiler to a string in c++ |
270 |
+ |
STR_DEFINE(svnrev, SVN_REV ); |
271 |
+ |
version.append(" Revision: "); |
272 |
+ |
// If there's no SVN revision, just call this the RELEASE revision. |
273 |
+ |
if (!svnrev.empty()) { |
274 |
+ |
version.append(svnrev); |
275 |
+ |
} else { |
276 |
+ |
version.append("RELEASE"); |
277 |
+ |
} |
278 |
+ |
|
279 |
|
#ifdef IS_MPI |
280 |
|
const int masterNode = 0; |
281 |
|
if (worldRank == masterNode) { |
282 |
|
#endif |
283 |
|
|
284 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
284 |
> |
std::ifstream mdFile_; |
285 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
286 |
|
|
287 |
|
if (mdFile_.fail()) { |
288 |
|
sprintf(painCave.errMsg, |
309 |
|
painCave.isFatal = 1; |
310 |
|
simError(); |
311 |
|
} |
312 |
+ |
|
313 |
+ |
// found the correct opening string, now try to get the file |
314 |
+ |
// format version number. |
315 |
|
|
316 |
+ |
StringTokenizer tokenizer(line, "=<> \t\n\r"); |
317 |
+ |
std::string fileType = tokenizer.nextToken(); |
318 |
+ |
toUpper(fileType); |
319 |
+ |
|
320 |
+ |
mdFileVersion = 0; |
321 |
+ |
|
322 |
+ |
if (fileType == "OPENMD") { |
323 |
+ |
while (tokenizer.hasMoreTokens()) { |
324 |
+ |
std::string token(tokenizer.nextToken()); |
325 |
+ |
toUpper(token); |
326 |
+ |
if (token == "VERSION") { |
327 |
+ |
mdFileVersion = tokenizer.nextTokenAsInt(); |
328 |
+ |
break; |
329 |
+ |
} |
330 |
+ |
} |
331 |
+ |
} |
332 |
+ |
|
333 |
|
//scan through the input stream and find MetaData tag |
334 |
|
while(mdFile_.getline(buffer, bufferSize)) { |
335 |
|
++lineNo; |
369 |
|
mdFile_.seekg(mdOffset); |
370 |
|
|
371 |
|
mdRawData.clear(); |
372 |
+ |
|
373 |
+ |
bool foundVersion = false; |
374 |
|
|
375 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
376 |
|
mdFile_.getline(buffer, bufferSize); |
377 |
< |
mdRawData += buffer; |
377 |
> |
std::string line = trimLeftCopy(buffer); |
378 |
> |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
379 |
> |
if (static_cast<size_t>(j) != string::npos) { |
380 |
> |
foundVersion = true; |
381 |
> |
mdRawData += version; |
382 |
> |
} else { |
383 |
> |
mdRawData += buffer; |
384 |
> |
} |
385 |
|
mdRawData += "\n"; |
386 |
|
} |
387 |
< |
|
387 |
> |
|
388 |
> |
if (!foundVersion) mdRawData += version + "\n"; |
389 |
> |
|
390 |
|
mdFile_.close(); |
391 |
|
|
392 |
|
#ifdef IS_MPI |
396 |
|
std::stringstream rawMetaDataStream(mdRawData); |
397 |
|
|
398 |
|
//parse meta-data file |
399 |
< |
Globals* simParams = parseFile(rawMetaDataStream, mdFileName, metaDataBlockStart+1); |
399 |
> |
Globals* simParams = parseFile(rawMetaDataStream, mdFileName, mdFileVersion, |
400 |
> |
metaDataBlockStart + 1); |
401 |
|
|
402 |
|
//create the force field |
403 |
< |
ForceField * ff = ForceFieldFactory::getInstance()->createForceField(simParams->getForceField()); |
403 |
> |
ForceField * ff = new ForceField(simParams->getForceField()); |
404 |
|
|
405 |
|
if (ff == NULL) { |
406 |
|
sprintf(painCave.errMsg, |
451 |
|
//create the molecules |
452 |
|
createMolecules(info); |
453 |
|
|
454 |
+ |
//find the storage layout |
455 |
+ |
|
456 |
+ |
int storageLayout = computeStorageLayout(info); |
457 |
+ |
|
458 |
|
//allocate memory for DataStorage(circular reference, need to |
459 |
|
//break it) |
460 |
< |
info->setSnapshotManager(new SimSnapshotManager(info)); |
460 |
> |
info->setSnapshotManager(new SimSnapshotManager(info, storageLayout)); |
461 |
|
|
462 |
|
//set the global index of atoms, rigidbodies and cutoffgroups |
463 |
|
//(only need to be set once, the global index will never change |
534 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
535 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
536 |
|
|
537 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
537 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
538 |
|
|
539 |
|
if (nProcessors > nGlobalMols) { |
540 |
|
sprintf(painCave.errMsg, |
572 |
|
nTarget = (int)(precast + 0.5); |
573 |
|
|
574 |
|
for(i = 0; i < nGlobalMols; i++) { |
575 |
+ |
|
576 |
|
done = 0; |
577 |
|
loops = 0; |
578 |
|
|
597 |
|
// and be done with it. |
598 |
|
|
599 |
|
if (loops > 100) { |
600 |
+ |
|
601 |
|
sprintf(painCave.errMsg, |
602 |
< |
"I've tried 100 times to assign molecule %d to a " |
603 |
< |
" processor, but can't find a good spot.\n" |
604 |
< |
"I'm assigning it at random to processor %d.\n", |
602 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
603 |
> |
"\tunderworked processor, but there's no good place to\n" |
604 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
605 |
|
i, which_proc); |
606 |
< |
|
606 |
> |
|
607 |
|
painCave.isFatal = 0; |
608 |
+ |
painCave.severity = OPENMD_INFO; |
609 |
|
simError(); |
610 |
|
|
611 |
|
molToProcMap[i] = which_proc; |
650 |
|
} |
651 |
|
|
652 |
|
delete myRandom; |
653 |
< |
|
653 |
> |
|
654 |
|
// Spray out this nonsense to all other processors: |
655 |
< |
|
584 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
655 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
656 |
|
} else { |
657 |
|
|
658 |
|
// Listen to your marching orders from processor 0: |
659 |
< |
|
660 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
659 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
660 |
> |
|
661 |
|
} |
662 |
|
|
663 |
|
info->setMolToProcMap(molToProcMap); |
695 |
|
|
696 |
|
} //end for(int i=0) |
697 |
|
} |
698 |
+ |
|
699 |
+ |
int SimCreator::computeStorageLayout(SimInfo* info) { |
700 |
+ |
|
701 |
+ |
Globals* simParams = info->getSimParams(); |
702 |
+ |
int nRigidBodies = info->getNGlobalRigidBodies(); |
703 |
+ |
set<AtomType*> atomTypes = info->getSimulatedAtomTypes(); |
704 |
+ |
set<AtomType*>::iterator i; |
705 |
+ |
bool hasDirectionalAtoms = false; |
706 |
+ |
bool hasFixedCharge = false; |
707 |
+ |
bool hasDipoles = false; |
708 |
+ |
bool hasQuadrupoles = false; |
709 |
+ |
bool hasPolarizable = false; |
710 |
+ |
bool hasFluctuatingCharge = false; |
711 |
+ |
bool hasMetallic = false; |
712 |
+ |
int storageLayout = 0; |
713 |
+ |
storageLayout |= DataStorage::dslPosition; |
714 |
+ |
storageLayout |= DataStorage::dslVelocity; |
715 |
+ |
storageLayout |= DataStorage::dslForce; |
716 |
+ |
|
717 |
+ |
for (i = atomTypes.begin(); i != atomTypes.end(); ++i) { |
718 |
+ |
|
719 |
+ |
DirectionalAdapter da = DirectionalAdapter( (*i) ); |
720 |
+ |
MultipoleAdapter ma = MultipoleAdapter( (*i) ); |
721 |
+ |
EAMAdapter ea = EAMAdapter( (*i) ); |
722 |
+ |
SuttonChenAdapter sca = SuttonChenAdapter( (*i) ); |
723 |
+ |
PolarizableAdapter pa = PolarizableAdapter( (*i) ); |
724 |
+ |
FixedChargeAdapter fca = FixedChargeAdapter( (*i) ); |
725 |
+ |
FluctuatingChargeAdapter fqa = FluctuatingChargeAdapter( (*i) ); |
726 |
+ |
|
727 |
+ |
if (da.isDirectional()){ |
728 |
+ |
hasDirectionalAtoms = true; |
729 |
+ |
} |
730 |
+ |
if (ma.isDipole()){ |
731 |
+ |
hasDipoles = true; |
732 |
+ |
} |
733 |
+ |
if (ma.isQuadrupole()){ |
734 |
+ |
hasQuadrupoles = true; |
735 |
+ |
} |
736 |
+ |
if (ea.isEAM() || sca.isSuttonChen()){ |
737 |
+ |
hasMetallic = true; |
738 |
+ |
} |
739 |
+ |
if ( fca.isFixedCharge() ){ |
740 |
+ |
hasFixedCharge = true; |
741 |
+ |
} |
742 |
+ |
if ( fqa.isFluctuatingCharge() ){ |
743 |
+ |
hasFluctuatingCharge = true; |
744 |
+ |
} |
745 |
+ |
if ( pa.isPolarizable() ){ |
746 |
+ |
hasPolarizable = true; |
747 |
+ |
} |
748 |
+ |
} |
749 |
|
|
750 |
+ |
if (nRigidBodies > 0 || hasDirectionalAtoms) { |
751 |
+ |
storageLayout |= DataStorage::dslAmat; |
752 |
+ |
if(storageLayout & DataStorage::dslVelocity) { |
753 |
+ |
storageLayout |= DataStorage::dslAngularMomentum; |
754 |
+ |
} |
755 |
+ |
if (storageLayout & DataStorage::dslForce) { |
756 |
+ |
storageLayout |= DataStorage::dslTorque; |
757 |
+ |
} |
758 |
+ |
} |
759 |
+ |
if (hasDipoles) { |
760 |
+ |
storageLayout |= DataStorage::dslDipole; |
761 |
+ |
} |
762 |
+ |
if (hasQuadrupoles) { |
763 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
764 |
+ |
} |
765 |
+ |
if (hasFixedCharge || hasFluctuatingCharge) { |
766 |
+ |
storageLayout |= DataStorage::dslSkippedCharge; |
767 |
+ |
} |
768 |
+ |
if (hasMetallic) { |
769 |
+ |
storageLayout |= DataStorage::dslDensity; |
770 |
+ |
storageLayout |= DataStorage::dslFunctional; |
771 |
+ |
storageLayout |= DataStorage::dslFunctionalDerivative; |
772 |
+ |
} |
773 |
+ |
if (hasPolarizable) { |
774 |
+ |
storageLayout |= DataStorage::dslElectricField; |
775 |
+ |
} |
776 |
+ |
if (hasFluctuatingCharge){ |
777 |
+ |
storageLayout |= DataStorage::dslFlucQPosition; |
778 |
+ |
if(storageLayout & DataStorage::dslVelocity) { |
779 |
+ |
storageLayout |= DataStorage::dslFlucQVelocity; |
780 |
+ |
} |
781 |
+ |
if (storageLayout & DataStorage::dslForce) { |
782 |
+ |
storageLayout |= DataStorage::dslFlucQForce; |
783 |
+ |
} |
784 |
+ |
} |
785 |
+ |
|
786 |
+ |
// if the user has asked for them, make sure we've got the memory for the |
787 |
+ |
// objects defined. |
788 |
+ |
|
789 |
+ |
if (simParams->getOutputParticlePotential()) { |
790 |
+ |
storageLayout |= DataStorage::dslParticlePot; |
791 |
+ |
} |
792 |
+ |
|
793 |
+ |
if (simParams->havePrintHeatFlux()) { |
794 |
+ |
if (simParams->getPrintHeatFlux()) { |
795 |
+ |
storageLayout |= DataStorage::dslParticlePot; |
796 |
+ |
} |
797 |
+ |
} |
798 |
+ |
|
799 |
+ |
if (simParams->getOutputElectricField()) { |
800 |
+ |
storageLayout |= DataStorage::dslElectricField; |
801 |
+ |
} |
802 |
+ |
|
803 |
+ |
if (simParams->getOutputFluctuatingCharges()) { |
804 |
+ |
storageLayout |= DataStorage::dslFlucQPosition; |
805 |
+ |
storageLayout |= DataStorage::dslFlucQVelocity; |
806 |
+ |
storageLayout |= DataStorage::dslFlucQForce; |
807 |
+ |
} |
808 |
+ |
|
809 |
+ |
return storageLayout; |
810 |
+ |
} |
811 |
+ |
|
812 |
|
void SimCreator::setGlobalIndex(SimInfo *info) { |
813 |
|
SimInfo::MoleculeIterator mi; |
814 |
|
Molecule::AtomIterator ai; |
823 |
|
int beginRigidBodyIndex; |
824 |
|
int beginCutoffGroupIndex; |
825 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
826 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
827 |
|
|
828 |
|
beginAtomIndex = 0; |
829 |
< |
beginRigidBodyIndex = 0; |
829 |
> |
//rigidbody's index begins right after atom's |
830 |
> |
beginRigidBodyIndex = info->getNGlobalAtoms(); |
831 |
|
beginCutoffGroupIndex = 0; |
832 |
|
|
833 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
890 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
891 |
|
// docs said we could. |
892 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
893 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
894 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
893 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
894 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
895 |
> |
MPI::INT, MPI::SUM); |
896 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
897 |
|
#else |
898 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
899 |
|
#endif |
900 |
|
|
901 |
|
//fill molMembership |
902 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
902 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
903 |
> |
info->getNGlobalRigidBodies(), 0); |
904 |
|
|
905 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
905 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
906 |
> |
mol = info->nextMolecule(mi)) { |
907 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
908 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
909 |
|
} |
910 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
911 |
+ |
rb = mol->nextRigidBody(ri)) { |
912 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
913 |
+ |
} |
914 |
|
} |
915 |
|
|
916 |
|
#ifdef IS_MPI |
917 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
917 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
918 |
> |
info->getNGlobalRigidBodies(), 0); |
919 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
920 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
921 |
> |
MPI::INT, MPI::SUM); |
922 |
|
|
726 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
727 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
728 |
– |
|
923 |
|
info->setGlobalMolMembership(tmpMolMembership); |
924 |
|
#else |
925 |
|
info->setGlobalMolMembership(globalMolMembership); |
929 |
|
// here the molecules are listed by their global indices. |
930 |
|
|
931 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
932 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
932 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
933 |
> |
mol = info->nextMolecule(mi)) { |
934 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
935 |
|
} |
936 |
|
|
937 |
|
#ifdef IS_MPI |
938 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
939 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
940 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
939 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
940 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
941 |
|
#else |
942 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
943 |
|
#endif |
951 |
|
} |
952 |
|
|
953 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
954 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
954 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
955 |
> |
mol = info->nextMolecule(mi)) { |
956 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
957 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
958 |
< |
for (StuntDouble* integrableObject = mol->beginIntegrableObject(ioi); integrableObject != NULL; |
959 |
< |
integrableObject = mol->nextIntegrableObject(ioi)) { |
960 |
< |
integrableObject->setGlobalIntegrableObjectIndex(globalIO); |
961 |
< |
IOIndexToIntegrableObject[globalIO] = integrableObject; |
958 |
> |
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
959 |
> |
sd = mol->nextIntegrableObject(ioi)) { |
960 |
> |
sd->setGlobalIntegrableObjectIndex(globalIO); |
961 |
> |
IOIndexToIntegrableObject[globalIO] = sd; |
962 |
|
globalIO++; |
963 |
|
} |
964 |
|
} |