| 35 | 
  | 
 *                                                                       | 
| 36 | 
  | 
 * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005).              | 
| 37 | 
  | 
 * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006).           | 
| 38 | 
< | 
 * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008).           | 
| 38 | 
> | 
 * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008).           | 
| 39 | 
  | 
 * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010). | 
| 40 | 
  | 
 * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). | 
| 41 | 
  | 
 */ | 
| 44 | 
  | 
 * @file SimCreator.cpp | 
| 45 | 
  | 
 * @author tlin | 
| 46 | 
  | 
 * @date 11/03/2004 | 
| 47 | 
– | 
 * @time 13:51am | 
| 47 | 
  | 
 * @version 1.0 | 
| 48 | 
  | 
 */ | 
| 49 | 
  | 
#include <exception> | 
| 99 | 
  | 
#ifdef IS_MPI             | 
| 100 | 
  | 
      int streamSize; | 
| 101 | 
  | 
      const int masterNode = 0; | 
| 102 | 
< | 
      int commStatus; | 
| 102 | 
> | 
 | 
| 103 | 
  | 
      if (worldRank == masterNode) { | 
| 104 | 
< | 
        commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 104 | 
> | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 105 | 
  | 
#endif                  | 
| 106 | 
  | 
        SimplePreprocessor preprocessor; | 
| 107 | 
  | 
        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); | 
| 109 | 
  | 
#ifdef IS_MPI             | 
| 110 | 
  | 
        //brocasting the stream size | 
| 111 | 
  | 
        streamSize = ppStream.str().size() +1; | 
| 112 | 
< | 
        commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD);                    | 
| 113 | 
< | 
 | 
| 114 | 
< | 
        commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);  | 
| 116 | 
< | 
             | 
| 117 | 
< | 
                 | 
| 112 | 
> | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 113 | 
> | 
        MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); | 
| 114 | 
> | 
                            | 
| 115 | 
  | 
      } else { | 
| 116 | 
+ | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 117 | 
  | 
 | 
| 120 | 
– | 
        commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 121 | 
– | 
 | 
| 118 | 
  | 
        //get stream size | 
| 119 | 
< | 
        commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD);    | 
| 119 | 
> | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 120 | 
  | 
 | 
| 121 | 
  | 
        char* buf = new char[streamSize]; | 
| 122 | 
  | 
        assert(buf); | 
| 123 | 
  | 
                 | 
| 124 | 
  | 
        //receive file content | 
| 125 | 
< | 
        commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD);  | 
| 125 | 
> | 
        MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 126 | 
  | 
                 | 
| 127 | 
  | 
        ppStream.str(buf); | 
| 128 | 
  | 
        delete [] buf; | 
| 133 | 
– | 
 | 
| 129 | 
  | 
      } | 
| 130 | 
  | 
#endif             | 
| 131 | 
  | 
      // Create a scanner that reads from the input stream | 
| 250 | 
  | 
    std::string mdRawData; | 
| 251 | 
  | 
    int metaDataBlockStart = -1; | 
| 252 | 
  | 
    int metaDataBlockEnd = -1; | 
| 253 | 
< | 
    int i; | 
| 253 | 
> | 
    int i, j; | 
| 254 | 
  | 
    streamoff mdOffset; | 
| 255 | 
  | 
    int mdFileVersion; | 
| 256 | 
  | 
 | 
| 257 | 
+ | 
    // Create a string for embedding the version information in the MetaData | 
| 258 | 
+ | 
    std::string version; | 
| 259 | 
+ | 
    version.assign("## Last run using OpenMD Version: "); | 
| 260 | 
+ | 
    version.append(OPENMD_VERSION_MAJOR); | 
| 261 | 
+ | 
    version.append("."); | 
| 262 | 
+ | 
    version.append(OPENMD_VERSION_MINOR); | 
| 263 | 
+ | 
 | 
| 264 | 
+ | 
    std::string svnrev; | 
| 265 | 
+ | 
    //convert a macro from compiler to a string in c++ | 
| 266 | 
+ | 
    STR_DEFINE(svnrev, SVN_REV ); | 
| 267 | 
+ | 
    version.append(" Revision: "); | 
| 268 | 
+ | 
    // If there's no SVN revision, just call this the RELEASE revision. | 
| 269 | 
+ | 
    if (!svnrev.empty()) { | 
| 270 | 
+ | 
      version.append(svnrev); | 
| 271 | 
+ | 
    } else { | 
| 272 | 
+ | 
      version.append("RELEASE"); | 
| 273 | 
+ | 
    } | 
| 274 | 
+ | 
    | 
| 275 | 
  | 
#ifdef IS_MPI             | 
| 276 | 
  | 
    const int masterNode = 0; | 
| 277 | 
  | 
    if (worldRank == masterNode) { | 
| 278 | 
  | 
#endif  | 
| 279 | 
  | 
 | 
| 280 | 
< | 
      std::ifstream mdFile_(mdFileName.c_str());  | 
| 280 | 
> | 
      std::ifstream mdFile_; | 
| 281 | 
> | 
      mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); | 
| 282 | 
  | 
       | 
| 283 | 
  | 
      if (mdFile_.fail()) {  | 
| 284 | 
  | 
        sprintf(painCave.errMsg,  | 
| 366 | 
  | 
 | 
| 367 | 
  | 
      mdRawData.clear(); | 
| 368 | 
  | 
 | 
| 369 | 
+ | 
      bool foundVersion = false; | 
| 370 | 
+ | 
 | 
| 371 | 
  | 
      for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { | 
| 372 | 
  | 
        mdFile_.getline(buffer, bufferSize); | 
| 373 | 
< | 
        mdRawData += buffer; | 
| 373 | 
> | 
        std::string line = trimLeftCopy(buffer); | 
| 374 | 
> | 
        j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); | 
| 375 | 
> | 
        if (static_cast<size_t>(j) != string::npos) { | 
| 376 | 
> | 
          foundVersion = true; | 
| 377 | 
> | 
          mdRawData += version; | 
| 378 | 
> | 
        } else { | 
| 379 | 
> | 
          mdRawData += buffer; | 
| 380 | 
> | 
        } | 
| 381 | 
  | 
        mdRawData += "\n"; | 
| 382 | 
  | 
      } | 
| 383 | 
< | 
 | 
| 383 | 
> | 
       | 
| 384 | 
> | 
      if (!foundVersion) mdRawData += version + "\n"; | 
| 385 | 
> | 
       | 
| 386 | 
  | 
      mdFile_.close(); | 
| 387 | 
  | 
 | 
| 388 | 
  | 
#ifdef IS_MPI | 
| 510 | 
  | 
   | 
| 511 | 
  | 
#ifdef IS_MPI | 
| 512 | 
  | 
  void SimCreator::divideMolecules(SimInfo *info) { | 
| 488 | 
– | 
    RealType numerator; | 
| 489 | 
– | 
    RealType denominator; | 
| 490 | 
– | 
    RealType precast; | 
| 491 | 
– | 
    RealType x; | 
| 492 | 
– | 
    RealType y; | 
| 513 | 
  | 
    RealType a; | 
| 494 | 
– | 
    int old_atoms; | 
| 495 | 
– | 
    int add_atoms; | 
| 496 | 
– | 
    int new_atoms; | 
| 497 | 
– | 
    int nTarget; | 
| 498 | 
– | 
    int done; | 
| 499 | 
– | 
    int i; | 
| 500 | 
– | 
    int j; | 
| 501 | 
– | 
    int loops; | 
| 502 | 
– | 
    int which_proc; | 
| 514 | 
  | 
    int nProcessors; | 
| 515 | 
  | 
    std::vector<int> atomsPerProc; | 
| 516 | 
  | 
    int nGlobalMols = info->getNGlobalMolecules(); | 
| 517 | 
< | 
    std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: | 
| 517 | 
> | 
    std::vector<int> molToProcMap(nGlobalMols, -1); // default to an | 
| 518 | 
> | 
                                                    // error | 
| 519 | 
> | 
                                                    // condition: | 
| 520 | 
  | 
     | 
| 521 | 
< | 
    MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); | 
| 521 | 
> | 
    nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 522 | 
  | 
     | 
| 523 | 
  | 
    if (nProcessors > nGlobalMols) { | 
| 524 | 
  | 
      sprintf(painCave.errMsg, | 
| 527 | 
  | 
              "\tthe number of molecules.  This will not result in a \n" | 
| 528 | 
  | 
              "\tusable division of atoms for force decomposition.\n" | 
| 529 | 
  | 
              "\tEither try a smaller number of processors, or run the\n" | 
| 530 | 
< | 
              "\tsingle-processor version of OpenMD.\n", nProcessors, nGlobalMols); | 
| 530 | 
> | 
              "\tsingle-processor version of OpenMD.\n", nProcessors,  | 
| 531 | 
> | 
              nGlobalMols); | 
| 532 | 
  | 
       | 
| 533 | 
  | 
      painCave.isFatal = 1; | 
| 534 | 
  | 
      simError(); | 
| 535 | 
  | 
    } | 
| 536 | 
  | 
     | 
| 523 | 
– | 
    int seedValue; | 
| 537 | 
  | 
    Globals * simParams = info->getSimParams(); | 
| 538 | 
< | 
    SeqRandNumGen* myRandom; //divide labor does not need Parallel random number generator | 
| 538 | 
> | 
    SeqRandNumGen* myRandom; //divide labor does not need Parallel | 
| 539 | 
> | 
                             //random number generator | 
| 540 | 
  | 
    if (simParams->haveSeed()) { | 
| 541 | 
< | 
      seedValue = simParams->getSeed(); | 
| 541 | 
> | 
      int seedValue = simParams->getSeed(); | 
| 542 | 
  | 
      myRandom = new SeqRandNumGen(seedValue); | 
| 543 | 
  | 
    }else { | 
| 544 | 
  | 
      myRandom = new SeqRandNumGen(); | 
| 551 | 
  | 
    atomsPerProc.insert(atomsPerProc.end(), nProcessors, 0); | 
| 552 | 
  | 
     | 
| 553 | 
  | 
    if (worldRank == 0) { | 
| 554 | 
< | 
      numerator = info->getNGlobalAtoms(); | 
| 555 | 
< | 
      denominator = nProcessors; | 
| 556 | 
< | 
      precast = numerator / denominator; | 
| 557 | 
< | 
      nTarget = (int)(precast + 0.5); | 
| 554 | 
> | 
      RealType numerator = info->getNGlobalAtoms(); | 
| 555 | 
> | 
      RealType denominator = nProcessors; | 
| 556 | 
> | 
      RealType precast = numerator / denominator; | 
| 557 | 
> | 
      int nTarget = (int)(precast + 0.5); | 
| 558 | 
  | 
       | 
| 559 | 
< | 
      for(i = 0; i < nGlobalMols; i++) { | 
| 560 | 
< | 
        done = 0; | 
| 561 | 
< | 
        loops = 0; | 
| 559 | 
> | 
      for(int i = 0; i < nGlobalMols; i++) { | 
| 560 | 
> | 
 | 
| 561 | 
> | 
        int done = 0; | 
| 562 | 
> | 
        int loops = 0; | 
| 563 | 
  | 
         | 
| 564 | 
  | 
        while (!done) { | 
| 565 | 
  | 
          loops++; | 
| 566 | 
  | 
           | 
| 567 | 
  | 
          // Pick a processor at random | 
| 568 | 
  | 
           | 
| 569 | 
< | 
          which_proc = (int) (myRandom->rand() * nProcessors); | 
| 569 | 
> | 
          int which_proc = (int) (myRandom->rand() * nProcessors); | 
| 570 | 
  | 
           | 
| 571 | 
  | 
          //get the molecule stamp first | 
| 572 | 
  | 
          int stampId = info->getMoleculeStampId(i); | 
| 573 | 
  | 
          MoleculeStamp * moleculeStamp = info->getMoleculeStamp(stampId); | 
| 574 | 
  | 
           | 
| 575 | 
  | 
          // How many atoms does this processor have so far? | 
| 576 | 
< | 
          old_atoms = atomsPerProc[which_proc]; | 
| 577 | 
< | 
          add_atoms = moleculeStamp->getNAtoms(); | 
| 578 | 
< | 
          new_atoms = old_atoms + add_atoms; | 
| 576 | 
> | 
          int old_atoms = atomsPerProc[which_proc]; | 
| 577 | 
> | 
          int add_atoms = moleculeStamp->getNAtoms(); | 
| 578 | 
> | 
          int new_atoms = old_atoms + add_atoms; | 
| 579 | 
  | 
           | 
| 580 | 
  | 
          // If we've been through this loop too many times, we need | 
| 581 | 
  | 
          // to just give up and assign the molecule to this processor | 
| 582 | 
  | 
          // and be done with it.  | 
| 583 | 
  | 
           | 
| 584 | 
  | 
          if (loops > 100) { | 
| 585 | 
+ | 
 | 
| 586 | 
  | 
            sprintf(painCave.errMsg, | 
| 587 | 
< | 
                    "I've tried 100 times to assign molecule %d to a " | 
| 588 | 
< | 
                    " processor, but can't find a good spot.\n" | 
| 589 | 
< | 
                    "I'm assigning it at random to processor %d.\n", | 
| 587 | 
> | 
                    "There have been 100 attempts to assign molecule %d to an\n" | 
| 588 | 
> | 
                    "\tunderworked processor, but there's no good place to\n" | 
| 589 | 
> | 
                    "\tleave it.  OpenMD is assigning it at random to processor %d.\n", | 
| 590 | 
  | 
                    i, which_proc); | 
| 591 | 
< | 
             | 
| 591 | 
> | 
            | 
| 592 | 
  | 
            painCave.isFatal = 0; | 
| 593 | 
+ | 
            painCave.severity = OPENMD_INFO; | 
| 594 | 
  | 
            simError(); | 
| 595 | 
  | 
             | 
| 596 | 
  | 
            molToProcMap[i] = which_proc; | 
| 619 | 
  | 
          //           Pacc(x) = exp(- a * x) | 
| 620 | 
  | 
          // where a = penalty / (average atoms per molecule) | 
| 621 | 
  | 
           | 
| 622 | 
< | 
          x = (RealType)(new_atoms - nTarget); | 
| 623 | 
< | 
          y = myRandom->rand(); | 
| 622 | 
> | 
          RealType x = (RealType)(new_atoms - nTarget); | 
| 623 | 
> | 
          RealType y = myRandom->rand(); | 
| 624 | 
  | 
           | 
| 625 | 
  | 
          if (y < exp(- a * x)) { | 
| 626 | 
  | 
            molToProcMap[i] = which_proc; | 
| 635 | 
  | 
      } | 
| 636 | 
  | 
       | 
| 637 | 
  | 
      delete myRandom; | 
| 638 | 
< | 
       | 
| 638 | 
> | 
 | 
| 639 | 
  | 
      // Spray out this nonsense to all other processors: | 
| 640 | 
< | 
       | 
| 624 | 
< | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 640 | 
> | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 641 | 
  | 
    } else { | 
| 642 | 
  | 
       | 
| 643 | 
  | 
      // Listen to your marching orders from processor 0: | 
| 644 | 
< | 
       | 
| 645 | 
< | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 644 | 
> | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 645 | 
> | 
 | 
| 646 | 
  | 
    } | 
| 647 | 
  | 
     | 
| 648 | 
  | 
    info->setMolToProcMap(molToProcMap); | 
| 689 | 
  | 
    set<AtomType*>::iterator i; | 
| 690 | 
  | 
    bool hasDirectionalAtoms = false; | 
| 691 | 
  | 
    bool hasFixedCharge = false; | 
| 692 | 
< | 
    bool hasMultipoles = false;     | 
| 692 | 
> | 
    bool hasDipoles = false;     | 
| 693 | 
> | 
    bool hasQuadrupoles = false;     | 
| 694 | 
  | 
    bool hasPolarizable = false;     | 
| 695 | 
  | 
    bool hasFluctuatingCharge = false;     | 
| 696 | 
  | 
    bool hasMetallic = false; | 
| 712 | 
  | 
      if (da.isDirectional()){ | 
| 713 | 
  | 
        hasDirectionalAtoms = true; | 
| 714 | 
  | 
      } | 
| 715 | 
< | 
      if (ma.isMultipole()){ | 
| 716 | 
< | 
        hasMultipoles = true; | 
| 715 | 
> | 
      if (ma.isDipole()){ | 
| 716 | 
> | 
        hasDipoles = true; | 
| 717 | 
  | 
      } | 
| 718 | 
+ | 
      if (ma.isQuadrupole()){ | 
| 719 | 
+ | 
        hasQuadrupoles = true; | 
| 720 | 
+ | 
      } | 
| 721 | 
  | 
      if (ea.isEAM() || sca.isSuttonChen()){ | 
| 722 | 
  | 
        hasMetallic = true; | 
| 723 | 
  | 
      } | 
| 741 | 
  | 
        storageLayout |= DataStorage::dslTorque; | 
| 742 | 
  | 
      } | 
| 743 | 
  | 
    } | 
| 744 | 
< | 
    if (hasMultipoles) { | 
| 745 | 
< | 
      storageLayout |= DataStorage::dslElectroFrame; | 
| 744 | 
> | 
    if (hasDipoles) { | 
| 745 | 
> | 
      storageLayout |= DataStorage::dslDipole; | 
| 746 | 
> | 
    } | 
| 747 | 
> | 
    if (hasQuadrupoles) { | 
| 748 | 
> | 
      storageLayout |= DataStorage::dslQuadrupole; | 
| 749 | 
  | 
    } | 
| 750 | 
  | 
    if (hasFixedCharge || hasFluctuatingCharge) { | 
| 751 | 
  | 
      storageLayout |= DataStorage::dslSkippedCharge; | 
| 781 | 
  | 
      } | 
| 782 | 
  | 
    } | 
| 783 | 
  | 
 | 
| 784 | 
< | 
    if (simParams->getOutputElectricField()) { | 
| 784 | 
> | 
    if (simParams->getOutputElectricField() | simParams->haveElectricField()) { | 
| 785 | 
  | 
      storageLayout |= DataStorage::dslElectricField; | 
| 786 | 
  | 
    } | 
| 787 | 
+ | 
 | 
| 788 | 
  | 
    if (simParams->getOutputFluctuatingCharges()) { | 
| 789 | 
  | 
      storageLayout |= DataStorage::dslFlucQPosition; | 
| 790 | 
  | 
      storageLayout |= DataStorage::dslFlucQVelocity; | 
| 791 | 
  | 
      storageLayout |= DataStorage::dslFlucQForce; | 
| 792 | 
  | 
    } | 
| 793 | 
  | 
 | 
| 794 | 
+ | 
    info->setStorageLayout(storageLayout); | 
| 795 | 
+ | 
 | 
| 796 | 
  | 
    return storageLayout; | 
| 797 | 
  | 
  } | 
| 798 | 
  | 
 | 
| 810 | 
  | 
    int beginRigidBodyIndex; | 
| 811 | 
  | 
    int beginCutoffGroupIndex; | 
| 812 | 
  | 
    int nGlobalAtoms = info->getNGlobalAtoms(); | 
| 813 | 
+ | 
    int nGlobalRigidBodies = info->getNGlobalRigidBodies(); | 
| 814 | 
  | 
     | 
| 815 | 
  | 
    beginAtomIndex = 0; | 
| 816 | 
  | 
    //rigidbody's index begins right after atom's | 
| 877 | 
  | 
    // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | 
| 878 | 
  | 
    // docs said we could. | 
| 879 | 
  | 
    std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | 
| 880 | 
< | 
    MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, | 
| 881 | 
< | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 880 | 
> | 
    MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],  | 
| 881 | 
> | 
                              &tmpGroupMembership[0], nGlobalAtoms, | 
| 882 | 
> | 
                              MPI::INT, MPI::SUM); | 
| 883 | 
  | 
    info->setGlobalGroupMembership(tmpGroupMembership); | 
| 884 | 
  | 
#else | 
| 885 | 
  | 
    info->setGlobalGroupMembership(globalGroupMembership); | 
| 886 | 
  | 
#endif | 
| 887 | 
  | 
     | 
| 888 | 
  | 
    //fill molMembership | 
| 889 | 
< | 
    std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); | 
| 889 | 
> | 
    std::vector<int> globalMolMembership(info->getNGlobalAtoms() +  | 
| 890 | 
> | 
                                         info->getNGlobalRigidBodies(), 0); | 
| 891 | 
  | 
     | 
| 892 | 
< | 
    for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { | 
| 892 | 
> | 
    for(mol = info->beginMolecule(mi); mol != NULL;  | 
| 893 | 
> | 
        mol = info->nextMolecule(mi)) { | 
| 894 | 
  | 
      for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { | 
| 895 | 
  | 
        globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); | 
| 896 | 
  | 
      } | 
| 897 | 
+ | 
      for (rb = mol->beginRigidBody(ri); rb != NULL;  | 
| 898 | 
+ | 
           rb = mol->nextRigidBody(ri)) { | 
| 899 | 
+ | 
        globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); | 
| 900 | 
+ | 
      } | 
| 901 | 
  | 
    } | 
| 902 | 
  | 
     | 
| 903 | 
  | 
#ifdef IS_MPI | 
| 904 | 
< | 
    std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); | 
| 904 | 
> | 
    std::vector<int> tmpMolMembership(info->getNGlobalAtoms() +  | 
| 905 | 
> | 
                                      info->getNGlobalRigidBodies(), 0); | 
| 906 | 
> | 
    MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 907 | 
> | 
                              nGlobalAtoms + nGlobalRigidBodies, | 
| 908 | 
> | 
                              MPI::INT, MPI::SUM); | 
| 909 | 
  | 
     | 
| 872 | 
– | 
    MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, | 
| 873 | 
– | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 874 | 
– | 
     | 
| 910 | 
  | 
    info->setGlobalMolMembership(tmpMolMembership); | 
| 911 | 
  | 
#else | 
| 912 | 
  | 
    info->setGlobalMolMembership(globalMolMembership); | 
| 916 | 
  | 
    // here the molecules are listed by their global indices. | 
| 917 | 
  | 
 | 
| 918 | 
  | 
    std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); | 
| 919 | 
< | 
    for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { | 
| 919 | 
> | 
    for (mol = info->beginMolecule(mi); mol != NULL; | 
| 920 | 
> | 
         mol = info->nextMolecule(mi)) { | 
| 921 | 
  | 
      nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects();        | 
| 922 | 
  | 
    } | 
| 923 | 
  | 
     | 
| 924 | 
  | 
#ifdef IS_MPI | 
| 925 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 926 | 
< | 
    MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 927 | 
< | 
                  info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 926 | 
> | 
    MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 927 | 
> | 
                              info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 928 | 
  | 
#else | 
| 929 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 930 | 
  | 
#endif     | 
| 938 | 
  | 
    } | 
| 939 | 
  | 
     | 
| 940 | 
  | 
    std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); | 
| 941 | 
< | 
    for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { | 
| 941 | 
> | 
    for (mol = info->beginMolecule(mi); mol != NULL;  | 
| 942 | 
> | 
         mol = info->nextMolecule(mi)) { | 
| 943 | 
  | 
      int myGlobalIndex = mol->getGlobalIndex(); | 
| 944 | 
  | 
      int globalIO = startingIOIndexForMol[myGlobalIndex]; | 
| 945 | 
  | 
      for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; | 
| 955 | 
  | 
  } | 
| 956 | 
  | 
   | 
| 957 | 
  | 
  void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { | 
| 921 | 
– | 
    Globals* simParams; | 
| 922 | 
– | 
 | 
| 923 | 
– | 
    simParams = info->getSimParams(); | 
| 958 | 
  | 
     | 
| 959 | 
  | 
    DumpReader reader(info, mdFileName); | 
| 960 | 
  | 
    int nframes = reader.getNFrames(); | 
| 961 | 
< | 
 | 
| 961 | 
> | 
     | 
| 962 | 
  | 
    if (nframes > 0) { | 
| 963 | 
  | 
      reader.readFrame(nframes - 1); | 
| 964 | 
  | 
    } else { |