| 513 |  | * data structures. | 
| 514 |  | */ | 
| 515 |  | void ForceMatrixDecomposition::collectIntermediateData() { | 
| 516 | + | #ifdef IS_MPI | 
| 517 | + |  | 
| 518 |  | snap_ = sman_->getCurrentSnapshot(); | 
| 519 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 520 | < | #ifdef IS_MPI | 
| 519 | < |  | 
| 520 | > |  | 
| 521 |  | if (storageLayout_ & DataStorage::dslDensity) { | 
| 522 |  |  | 
| 523 |  | AtomPlanRealRow->scatter(atomRowData.density, | 
| 552 |  | * row and column-indexed data structures | 
| 553 |  | */ | 
| 554 |  | void ForceMatrixDecomposition::distributeIntermediateData() { | 
| 555 | + | #ifdef IS_MPI | 
| 556 |  | snap_ = sman_->getCurrentSnapshot(); | 
| 557 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 558 | < | #ifdef IS_MPI | 
| 558 | > |  | 
| 559 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 560 |  | AtomPlanRealRow->gather(snap_->atomData.functional, | 
| 561 |  | atomRowData.functional); | 
| 574 |  |  | 
| 575 |  |  | 
| 576 |  | void ForceMatrixDecomposition::collectData() { | 
| 577 | + | #ifdef IS_MPI | 
| 578 |  | snap_ = sman_->getCurrentSnapshot(); | 
| 579 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 580 | < | #ifdef IS_MPI | 
| 580 | > |  | 
| 581 |  | int n = snap_->atomData.force.size(); | 
| 582 |  | vector<Vector3d> frc_tmp(n, V3Zero); | 
| 583 |  |  | 
| 785 |  | * functional) loops onto local data structures. | 
| 786 |  | */ | 
| 787 |  | void ForceMatrixDecomposition::collectSelfData() { | 
| 788 | + |  | 
| 789 | + | #ifdef IS_MPI | 
| 790 |  | snap_ = sman_->getCurrentSnapshot(); | 
| 791 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 792 |  |  | 
| 788 | – | #ifdef IS_MPI | 
| 793 |  | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 794 |  | RealType ploc1 = embeddingPot[ii]; | 
| 795 |  | RealType ploc2 = 0.0; | 
| 806 |  |  | 
| 807 |  | } | 
| 808 |  |  | 
| 805 | – |  | 
| 806 | – |  | 
| 809 |  | int& ForceMatrixDecomposition::getNAtomsInRow() { | 
| 810 |  | #ifdef IS_MPI | 
| 811 |  | return nAtomsInRow_; | 
| 833 |  | #endif | 
| 834 |  | } | 
| 835 |  |  | 
| 836 | < | Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){ | 
| 836 | > | Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, | 
| 837 | > | int cg2){ | 
| 838 | > |  | 
| 839 |  | Vector3d d; | 
| 836 | – |  | 
| 840 |  | #ifdef IS_MPI | 
| 841 |  | d = cgColData.position[cg2] - cgRowData.position[cg1]; | 
| 842 |  | #else | 
| 866 |  | } | 
| 867 |  |  | 
| 868 |  |  | 
| 869 | < | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ | 
| 870 | < |  | 
| 869 | > | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, | 
| 870 | > | int cg1) { | 
| 871 |  | Vector3d d; | 
| 872 |  |  | 
| 873 |  | #ifdef IS_MPI | 
| 881 |  | return d; | 
| 882 |  | } | 
| 883 |  |  | 
| 884 | < | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){ | 
| 884 | > | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, | 
| 885 | > | int cg2) { | 
| 886 |  | Vector3d d; | 
| 887 |  |  | 
| 888 |  | #ifdef IS_MPI | 
| 913 |  |  | 
| 914 |  | } | 
| 915 |  |  | 
| 916 | < | Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){ | 
| 916 | > | Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, | 
| 917 | > | int atom2){ | 
| 918 |  | Vector3d d; | 
| 919 |  |  | 
| 920 |  | #ifdef IS_MPI | 
| 936 |  | * We need to exclude some overcounted interactions that result from | 
| 937 |  | * the parallel decomposition. | 
| 938 |  | */ | 
| 939 | < | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) { | 
| 939 | > | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, | 
| 940 | > | int cg1, int cg2) { | 
| 941 |  | int unique_id_1, unique_id_2; | 
| 942 |  |  | 
| 943 |  | #ifdef IS_MPI |