| 460 |  | } | 
| 461 |  | } | 
| 462 |  | } | 
| 463 | – |  | 
| 463 |  |  | 
| 464 |  | groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) { | 
| 465 |  | int i, j; | 
| 851 |  | pairwisePot[ii] = ploc2; | 
| 852 |  | } | 
| 853 |  |  | 
| 855 | – | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 856 | – | RealType ploc1 = embeddingPot[ii]; | 
| 857 | – | RealType ploc2 = 0.0; | 
| 858 | – | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 859 | – | embeddingPot[ii] = ploc2; | 
| 860 | – | } | 
| 861 | – |  | 
| 854 |  | // Here be dragons. | 
| 855 |  | MPI::Intracomm col = colComm.getComm(); | 
| 856 |  |  | 
| 863 |  |  | 
| 864 |  | } | 
| 865 |  |  | 
| 866 | + | /** | 
| 867 | + | * Collects information obtained during the post-pair (and embedding | 
| 868 | + | * functional) loops onto local data structures. | 
| 869 | + | */ | 
| 870 | + | void ForceMatrixDecomposition::collectSelfData() { | 
| 871 | + | snap_ = sman_->getCurrentSnapshot(); | 
| 872 | + | storageLayout_ = sman_->getStorageLayout(); | 
| 873 | + |  | 
| 874 | + | #ifdef IS_MPI | 
| 875 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 876 | + | RealType ploc1 = embeddingPot[ii]; | 
| 877 | + | RealType ploc2 = 0.0; | 
| 878 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 879 | + | embeddingPot[ii] = ploc2; | 
| 880 | + | } | 
| 881 | + | #endif | 
| 882 | + |  | 
| 883 | + | } | 
| 884 | + |  | 
| 885 | + |  | 
| 886 | + |  | 
| 887 |  | int ForceMatrixDecomposition::getNAtomsInRow() { | 
| 888 |  | #ifdef IS_MPI | 
| 889 |  | return nAtomsInRow_; | 
| 1006 |  | * We need to exclude some overcounted interactions that result from | 
| 1007 |  | * the parallel decomposition. | 
| 1008 |  | */ | 
| 1009 | < | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { | 
| 1010 | < | int unique_id_1, unique_id_2; | 
| 1009 | > | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) { | 
| 1010 | > | int unique_id_1, unique_id_2, group1, group2; | 
| 1011 |  |  | 
| 1012 |  | #ifdef IS_MPI | 
| 1013 |  | // in MPI, we have to look up the unique IDs for each atom | 
| 1014 |  | unique_id_1 = AtomRowToGlobal[atom1]; | 
| 1015 |  | unique_id_2 = AtomColToGlobal[atom2]; | 
| 1016 | + | group1 = cgRowToGlobal[cg1]; | 
| 1017 | + | group2 = cgColToGlobal[cg2]; | 
| 1018 |  | #else | 
| 1019 |  | unique_id_1 = AtomLocalToGlobal[atom1]; | 
| 1020 |  | unique_id_2 = AtomLocalToGlobal[atom2]; | 
| 1021 | + | group1 = cgLocalToGlobal[cg1]; | 
| 1022 | + | group2 = cgLocalToGlobal[cg2]; | 
| 1023 |  | #endif | 
| 1024 |  |  | 
| 1025 |  | if (unique_id_1 == unique_id_2) return true; | 
| 1030 |  | if ((unique_id_1 + unique_id_2) % 2 == 0) return true; | 
| 1031 |  | } else { | 
| 1032 |  | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 1033 | + | } | 
| 1034 | + | #endif | 
| 1035 | + |  | 
| 1036 | + | #ifndef IS_MPI | 
| 1037 | + | if (group1 == group2) { | 
| 1038 | + | if (unique_id_1 < unique_id_2) return true; | 
| 1039 |  | } | 
| 1040 |  | #endif | 
| 1041 |  |  |