| 36 |  | * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | 
| 37 |  | * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | 
| 38 |  | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 | < | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 39 | > | * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010). | 
| 40 | > | * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). | 
| 41 |  | */ | 
| 42 |  | #include "parallel/ForceMatrixDecomposition.hpp" | 
| 43 |  | #include "math/SquareMatrix3.hpp" | 
| 95 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 96 |  | ff_ = info_->getForceField(); | 
| 97 |  | nLocal_ = snap_->getNumberOfAtoms(); | 
| 98 | < |  | 
| 98 | > |  | 
| 99 |  | nGroups_ = info_->getNLocalCutoffGroups(); | 
| 100 |  | // gather the information for atomtype IDs (atids): | 
| 101 |  | idents = info_->getIdentArray(); | 
| 109 |  | PairList* oneTwo = info_->getOneTwoInteractions(); | 
| 110 |  | PairList* oneThree = info_->getOneThreeInteractions(); | 
| 111 |  | PairList* oneFour = info_->getOneFourInteractions(); | 
| 112 | < |  | 
| 112 | > |  | 
| 113 | > | if (needVelocities_) | 
| 114 | > | snap_->cgData.setStorageLayout(DataStorage::dslPosition | | 
| 115 | > | DataStorage::dslVelocity); | 
| 116 | > | else | 
| 117 | > | snap_->cgData.setStorageLayout(DataStorage::dslPosition); | 
| 118 | > |  | 
| 119 |  | #ifdef IS_MPI | 
| 120 |  |  | 
| 121 |  | MPI::Intracomm row = rowComm.getComm(); | 
| 151 |  | cgRowData.resize(nGroupsInRow_); | 
| 152 |  | cgRowData.setStorageLayout(DataStorage::dslPosition); | 
| 153 |  | cgColData.resize(nGroupsInCol_); | 
| 154 | < | cgColData.setStorageLayout(DataStorage::dslPosition); | 
| 155 | < |  | 
| 154 | > | if (needVelocities_) | 
| 155 | > | // we only need column velocities if we need them. | 
| 156 | > | cgColData.setStorageLayout(DataStorage::dslPosition | | 
| 157 | > | DataStorage::dslVelocity); | 
| 158 | > | else | 
| 159 | > | cgColData.setStorageLayout(DataStorage::dslPosition); | 
| 160 | > |  | 
| 161 |  | identsRow.resize(nAtomsInRow_); | 
| 162 |  | identsCol.resize(nAtomsInCol_); | 
| 163 |  |  | 
| 241 |  | topoDist[i].push_back(3); | 
| 242 |  | } | 
| 243 |  | } | 
| 232 | – | } | 
| 233 | – | } | 
| 234 | – | } | 
| 235 | – |  | 
| 236 | – | #endif | 
| 237 | – |  | 
| 238 | – | // allocate memory for the parallel objects | 
| 239 | – | atypesLocal.resize(nLocal_); | 
| 240 | – |  | 
| 241 | – | for (int i = 0; i < nLocal_; i++) | 
| 242 | – | atypesLocal[i] = ff_->getAtomType(idents[i]); | 
| 243 | – |  | 
| 244 | – | groupList_.clear(); | 
| 245 | – | groupList_.resize(nGroups_); | 
| 246 | – | for (int i = 0; i < nGroups_; i++) { | 
| 247 | – | int gid = cgLocalToGlobal[i]; | 
| 248 | – | for (int j = 0; j < nLocal_; j++) { | 
| 249 | – | int aid = AtomLocalToGlobal[j]; | 
| 250 | – | if (globalGroupMembership[aid] == gid) { | 
| 251 | – | groupList_[i].push_back(j); | 
| 244 |  | } | 
| 245 |  | } | 
| 246 |  | } | 
| 247 |  |  | 
| 248 | + | #else | 
| 249 |  | excludesForAtom.clear(); | 
| 250 |  | excludesForAtom.resize(nLocal_); | 
| 251 |  | toposForAtom.clear(); | 
| 278 |  | } | 
| 279 |  | } | 
| 280 |  | } | 
| 281 | < |  | 
| 281 | > | #endif | 
| 282 | > |  | 
| 283 | > | // allocate memory for the parallel objects | 
| 284 | > | atypesLocal.resize(nLocal_); | 
| 285 | > |  | 
| 286 | > | for (int i = 0; i < nLocal_; i++) | 
| 287 | > | atypesLocal[i] = ff_->getAtomType(idents[i]); | 
| 288 | > |  | 
| 289 | > | groupList_.clear(); | 
| 290 | > | groupList_.resize(nGroups_); | 
| 291 | > | for (int i = 0; i < nGroups_; i++) { | 
| 292 | > | int gid = cgLocalToGlobal[i]; | 
| 293 | > | for (int j = 0; j < nLocal_; j++) { | 
| 294 | > | int aid = AtomLocalToGlobal[j]; | 
| 295 | > | if (globalGroupMembership[aid] == gid) { | 
| 296 | > | groupList_[i].push_back(j); | 
| 297 | > | } | 
| 298 | > | } | 
| 299 | > | } | 
| 300 | > |  | 
| 301 | > |  | 
| 302 |  | createGtypeCutoffMap(); | 
| 303 |  |  | 
| 304 |  | } | 
| 461 |  | } | 
| 462 |  | } | 
| 463 |  |  | 
| 451 | – |  | 
| 464 |  | groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) { | 
| 465 |  | int i, j; | 
| 466 |  | #ifdef IS_MPI | 
| 535 |  | atomColData.skippedCharge.end(), 0.0); | 
| 536 |  | } | 
| 537 |  |  | 
| 538 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 539 | + | fill(atomRowData.flucQFrc.begin(), | 
| 540 | + | atomRowData.flucQFrc.end(), 0.0); | 
| 541 | + | fill(atomColData.flucQFrc.begin(), | 
| 542 | + | atomColData.flucQFrc.end(), 0.0); | 
| 543 | + | } | 
| 544 | + |  | 
| 545 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 546 | + | fill(atomRowData.electricField.begin(), | 
| 547 | + | atomRowData.electricField.end(), V3Zero); | 
| 548 | + | fill(atomColData.electricField.begin(), | 
| 549 | + | atomColData.electricField.end(), V3Zero); | 
| 550 | + | } | 
| 551 | + |  | 
| 552 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 553 | + | fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(), | 
| 554 | + | 0.0); | 
| 555 | + | fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(), | 
| 556 | + | 0.0); | 
| 557 | + | } | 
| 558 | + |  | 
| 559 |  | #endif | 
| 560 |  | // even in parallel, we need to zero out the local arrays: | 
| 561 |  |  | 
| 568 |  | fill(snap_->atomData.density.begin(), | 
| 569 |  | snap_->atomData.density.end(), 0.0); | 
| 570 |  | } | 
| 571 | + |  | 
| 572 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 573 |  | fill(snap_->atomData.functional.begin(), | 
| 574 |  | snap_->atomData.functional.end(), 0.0); | 
| 575 |  | } | 
| 576 | + |  | 
| 577 |  | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 578 |  | fill(snap_->atomData.functionalDerivative.begin(), | 
| 579 |  | snap_->atomData.functionalDerivative.end(), 0.0); | 
| 580 |  | } | 
| 581 | + |  | 
| 582 |  | if (storageLayout_ & DataStorage::dslSkippedCharge) { | 
| 583 |  | fill(snap_->atomData.skippedCharge.begin(), | 
| 584 |  | snap_->atomData.skippedCharge.end(), 0.0); | 
| 585 |  | } | 
| 586 | < |  | 
| 586 | > |  | 
| 587 | > | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 588 | > | fill(snap_->atomData.electricField.begin(), | 
| 589 | > | snap_->atomData.electricField.end(), V3Zero); | 
| 590 | > | } | 
| 591 |  | } | 
| 592 |  |  | 
| 593 |  |  | 
| 610 |  | cgPlanVectorColumn->gather(snap_->cgData.position, | 
| 611 |  | cgColData.position); | 
| 612 |  |  | 
| 613 | + |  | 
| 614 | + |  | 
| 615 | + | if (needVelocities_) { | 
| 616 | + | // gather up the atomic velocities | 
| 617 | + | AtomPlanVectorColumn->gather(snap_->atomData.velocity, | 
| 618 | + | atomColData.velocity); | 
| 619 | + |  | 
| 620 | + | cgPlanVectorColumn->gather(snap_->cgData.velocity, | 
| 621 | + | cgColData.velocity); | 
| 622 | + | } | 
| 623 | + |  | 
| 624 |  |  | 
| 625 |  | // if needed, gather the atomic rotation matrices | 
| 626 |  | if (storageLayout_ & DataStorage::dslAmat) { | 
| 638 |  | atomColData.electroFrame); | 
| 639 |  | } | 
| 640 |  |  | 
| 641 | + | // if needed, gather the atomic fluctuating charge values | 
| 642 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 643 | + | AtomPlanRealRow->gather(snap_->atomData.flucQPos, | 
| 644 | + | atomRowData.flucQPos); | 
| 645 | + | AtomPlanRealColumn->gather(snap_->atomData.flucQPos, | 
| 646 | + | atomColData.flucQPos); | 
| 647 | + | } | 
| 648 | + |  | 
| 649 |  | #endif | 
| 650 |  | } | 
| 651 |  |  | 
| 668 |  | for (int i = 0; i < n; i++) | 
| 669 |  | snap_->atomData.density[i] += rho_tmp[i]; | 
| 670 |  | } | 
| 671 | + |  | 
| 672 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 673 | + |  | 
| 674 | + | AtomPlanVectorRow->scatter(atomRowData.electricField, | 
| 675 | + | snap_->atomData.electricField); | 
| 676 | + |  | 
| 677 | + | int n = snap_->atomData.electricField.size(); | 
| 678 | + | vector<Vector3d> field_tmp(n, V3Zero); | 
| 679 | + | AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp); | 
| 680 | + | for (int i = 0; i < n; i++) | 
| 681 | + | snap_->atomData.electricField[i] += field_tmp[i]; | 
| 682 | + | } | 
| 683 |  | #endif | 
| 684 |  | } | 
| 685 |  |  | 
| 754 |  | } | 
| 755 |  |  | 
| 756 |  | AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); | 
| 757 | < | for (int i = 0; i < ns; i++) | 
| 757 | > | for (int i = 0; i < ns; i++) | 
| 758 |  | snap_->atomData.skippedCharge[i] += skch_tmp[i]; | 
| 759 | + |  | 
| 760 |  | } | 
| 761 |  |  | 
| 762 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 763 | + |  | 
| 764 | + | int nq = snap_->atomData.flucQFrc.size(); | 
| 765 | + | vector<RealType> fqfrc_tmp(nq, 0.0); | 
| 766 | + |  | 
| 767 | + | AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp); | 
| 768 | + | for (int i = 0; i < nq; i++) { | 
| 769 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 770 | + | fqfrc_tmp[i] = 0.0; | 
| 771 | + | } | 
| 772 | + |  | 
| 773 | + | AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp); | 
| 774 | + | for (int i = 0; i < nq; i++) | 
| 775 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 776 | + |  | 
| 777 | + | } | 
| 778 | + |  | 
| 779 |  | nLocal_ = snap_->getNumberOfAtoms(); | 
| 780 |  |  | 
| 781 |  | vector<potVec> pot_temp(nLocal_, | 
| 787 |  |  | 
| 788 |  | for (int ii = 0;  ii < pot_temp.size(); ii++ ) | 
| 789 |  | pairwisePot += pot_temp[ii]; | 
| 790 | < |  | 
| 790 | > |  | 
| 791 | > | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 792 | > | // This is the pairwise contribution to the particle pot.  The | 
| 793 | > | // embedding contribution is added in each of the low level | 
| 794 | > | // non-bonded routines.  In single processor, this is done in | 
| 795 | > | // unpackInteractionData, not in collectData. | 
| 796 | > | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 797 | > | for (int i = 0; i < nLocal_; i++) { | 
| 798 | > | // factor of two is because the total potential terms are divided | 
| 799 | > | // by 2 in parallel due to row/ column scatter | 
| 800 | > | snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); | 
| 801 | > | } | 
| 802 | > | } | 
| 803 | > | } | 
| 804 | > |  | 
| 805 |  | fill(pot_temp.begin(), pot_temp.end(), | 
| 806 |  | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 807 |  |  | 
| 809 |  |  | 
| 810 |  | for (int ii = 0;  ii < pot_temp.size(); ii++ ) | 
| 811 |  | pairwisePot += pot_temp[ii]; | 
| 812 | + |  | 
| 813 | + | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 814 | + | // This is the pairwise contribution to the particle pot.  The | 
| 815 | + | // embedding contribution is added in each of the low level | 
| 816 | + | // non-bonded routines.  In single processor, this is done in | 
| 817 | + | // unpackInteractionData, not in collectData. | 
| 818 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 819 | + | for (int i = 0; i < nLocal_; i++) { | 
| 820 | + | // factor of two is because the total potential terms are divided | 
| 821 | + | // by 2 in parallel due to row/ column scatter | 
| 822 | + | snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); | 
| 823 | + | } | 
| 824 | + | } | 
| 825 | + | } | 
| 826 |  |  | 
| 827 | + | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 828 | + | int npp = snap_->atomData.particlePot.size(); | 
| 829 | + | vector<RealType> ppot_temp(npp, 0.0); | 
| 830 | + |  | 
| 831 | + | // This is the direct or embedding contribution to the particle | 
| 832 | + | // pot. | 
| 833 | + |  | 
| 834 | + | AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp); | 
| 835 | + | for (int i = 0; i < npp; i++) { | 
| 836 | + | snap_->atomData.particlePot[i] += ppot_temp[i]; | 
| 837 | + | } | 
| 838 | + |  | 
| 839 | + | fill(ppot_temp.begin(), ppot_temp.end(), 0.0); | 
| 840 | + |  | 
| 841 | + | AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp); | 
| 842 | + | for (int i = 0; i < npp; i++) { | 
| 843 | + | snap_->atomData.particlePot[i] += ppot_temp[i]; | 
| 844 | + | } | 
| 845 | + | } | 
| 846 | + |  | 
| 847 |  | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 848 |  | RealType ploc1 = pairwisePot[ii]; | 
| 849 |  | RealType ploc2 = 0.0; | 
| 851 |  | pairwisePot[ii] = ploc2; | 
| 852 |  | } | 
| 853 |  |  | 
| 854 | + | // Here be dragons. | 
| 855 | + | MPI::Intracomm col = colComm.getComm(); | 
| 856 | + |  | 
| 857 | + | col.Allreduce(MPI::IN_PLACE, | 
| 858 | + | &snap_->frameData.conductiveHeatFlux[0], 3, | 
| 859 | + | MPI::REALTYPE, MPI::SUM); | 
| 860 | + |  | 
| 861 | + |  | 
| 862 |  | #endif | 
| 863 |  |  | 
| 864 |  | } | 
| 865 |  |  | 
| 866 | + | /** | 
| 867 | + | * Collects information obtained during the post-pair (and embedding | 
| 868 | + | * functional) loops onto local data structures. | 
| 869 | + | */ | 
| 870 | + | void ForceMatrixDecomposition::collectSelfData() { | 
| 871 | + | snap_ = sman_->getCurrentSnapshot(); | 
| 872 | + | storageLayout_ = sman_->getStorageLayout(); | 
| 873 | + |  | 
| 874 | + | #ifdef IS_MPI | 
| 875 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 876 | + | RealType ploc1 = embeddingPot[ii]; | 
| 877 | + | RealType ploc2 = 0.0; | 
| 878 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 879 | + | embeddingPot[ii] = ploc2; | 
| 880 | + | } | 
| 881 | + | #endif | 
| 882 | + |  | 
| 883 | + | } | 
| 884 | + |  | 
| 885 | + |  | 
| 886 | + |  | 
| 887 |  | int ForceMatrixDecomposition::getNAtomsInRow() { | 
| 888 |  | #ifdef IS_MPI | 
| 889 |  | return nAtomsInRow_; | 
| 924 |  | return d; | 
| 925 |  | } | 
| 926 |  |  | 
| 927 | + | Vector3d ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){ | 
| 928 | + | #ifdef IS_MPI | 
| 929 | + | return cgColData.velocity[cg2]; | 
| 930 | + | #else | 
| 931 | + | return snap_->cgData.velocity[cg2]; | 
| 932 | + | #endif | 
| 933 | + | } | 
| 934 |  |  | 
| 935 | + | Vector3d ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){ | 
| 936 | + | #ifdef IS_MPI | 
| 937 | + | return atomColData.velocity[atom2]; | 
| 938 | + | #else | 
| 939 | + | return snap_->atomData.velocity[atom2]; | 
| 940 | + | #endif | 
| 941 | + | } | 
| 942 | + |  | 
| 943 | + |  | 
| 944 |  | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ | 
| 945 |  |  | 
| 946 |  | Vector3d d; | 
| 1006 |  | * We need to exclude some overcounted interactions that result from | 
| 1007 |  | * the parallel decomposition. | 
| 1008 |  | */ | 
| 1009 | < | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { | 
| 1010 | < | int unique_id_1, unique_id_2; | 
| 1011 | < |  | 
| 1009 | > | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) { | 
| 1010 | > | int unique_id_1, unique_id_2, group1, group2; | 
| 1011 | > |  | 
| 1012 |  | #ifdef IS_MPI | 
| 1013 |  | // in MPI, we have to look up the unique IDs for each atom | 
| 1014 |  | unique_id_1 = AtomRowToGlobal[atom1]; | 
| 1015 |  | unique_id_2 = AtomColToGlobal[atom2]; | 
| 1016 | + | group1 = cgRowToGlobal[cg1]; | 
| 1017 | + | group2 = cgColToGlobal[cg2]; | 
| 1018 | + | #else | 
| 1019 | + | unique_id_1 = AtomLocalToGlobal[atom1]; | 
| 1020 | + | unique_id_2 = AtomLocalToGlobal[atom2]; | 
| 1021 | + | group1 = cgLocalToGlobal[cg1]; | 
| 1022 | + | group2 = cgLocalToGlobal[cg2]; | 
| 1023 | + | #endif | 
| 1024 |  |  | 
| 835 | – | // this situation should only arise in MPI simulations | 
| 1025 |  | if (unique_id_1 == unique_id_2) return true; | 
| 1026 | < |  | 
| 1026 | > |  | 
| 1027 | > | #ifdef IS_MPI | 
| 1028 |  | // this prevents us from doing the pair on multiple processors | 
| 1029 |  | if (unique_id_1 < unique_id_2) { | 
| 1030 |  | if ((unique_id_1 + unique_id_2) % 2 == 0) return true; | 
| 1031 |  | } else { | 
| 1032 | < | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 1032 | > | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 1033 |  | } | 
| 1034 | + | #endif | 
| 1035 | + |  | 
| 1036 | + | #ifndef IS_MPI | 
| 1037 | + | if (group1 == group2) { | 
| 1038 | + | if (unique_id_1 < unique_id_2) return true; | 
| 1039 | + | } | 
| 1040 |  | #endif | 
| 1041 | + |  | 
| 1042 |  | return false; | 
| 1043 |  | } | 
| 1044 |  |  | 
| 1052 |  | * field) must still be handled for these pairs. | 
| 1053 |  | */ | 
| 1054 |  | bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { | 
| 1055 | < | int unique_id_2; | 
| 1056 | < | #ifdef IS_MPI | 
| 1057 | < | // in MPI, we have to look up the unique IDs for the row atom. | 
| 861 | < | unique_id_2 = AtomColToGlobal[atom2]; | 
| 862 | < | #else | 
| 863 | < | // in the normal loop, the atom numbers are unique | 
| 864 | < | unique_id_2 = atom2; | 
| 865 | < | #endif | 
| 1055 | > |  | 
| 1056 | > | // excludesForAtom was constructed to use row/column indices in the MPI | 
| 1057 | > | // version, and to use local IDs in the non-MPI version: | 
| 1058 |  |  | 
| 1059 |  | for (vector<int>::iterator i = excludesForAtom[atom1].begin(); | 
| 1060 |  | i != excludesForAtom[atom1].end(); ++i) { | 
| 1061 | < | if ( (*i) == unique_id_2 ) return true; | 
| 1061 | > | if ( (*i) == atom2 ) return true; | 
| 1062 |  | } | 
| 1063 |  |  | 
| 1064 |  | return false; | 
| 1132 |  | idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]); | 
| 1133 |  | } | 
| 1134 |  |  | 
| 1135 | < | #else | 
| 1135 | > | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 1136 | > | idat.flucQ1 = &(atomRowData.flucQPos[atom1]); | 
| 1137 | > | idat.flucQ2 = &(atomColData.flucQPos[atom2]); | 
| 1138 | > | } | 
| 1139 |  |  | 
| 1140 | + | #else | 
| 1141 | + |  | 
| 1142 |  | idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]); | 
| 946 | – | //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), | 
| 947 | – | //                         ff_->getAtomType(idents[atom2]) ); | 
| 1143 |  |  | 
| 1144 |  | if (storageLayout_ & DataStorage::dslAmat) { | 
| 1145 |  | idat.A1 = &(snap_->atomData.aMat[atom1]); | 
| 1180 |  | idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]); | 
| 1181 |  | idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]); | 
| 1182 |  | } | 
| 1183 | + |  | 
| 1184 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 1185 | + | idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]); | 
| 1186 | + | idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]); | 
| 1187 | + | } | 
| 1188 | + |  | 
| 1189 |  | #endif | 
| 1190 |  | } | 
| 1191 |  |  | 
| 1192 |  |  | 
| 1193 |  | void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) { | 
| 1194 |  | #ifdef IS_MPI | 
| 1195 | < | pot_row[atom1] += 0.5 *  *(idat.pot); | 
| 1196 | < | pot_col[atom2] += 0.5 *  *(idat.pot); | 
| 1195 | > | pot_row[atom1] += RealType(0.5) *  *(idat.pot); | 
| 1196 | > | pot_col[atom2] += RealType(0.5) *  *(idat.pot); | 
| 1197 |  |  | 
| 1198 |  | atomRowData.force[atom1] += *(idat.f1); | 
| 1199 |  | atomColData.force[atom2] -= *(idat.f1); | 
| 1200 | + |  | 
| 1201 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 1202 | + | atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1); | 
| 1203 | + | atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2); | 
| 1204 | + | } | 
| 1205 | + |  | 
| 1206 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 1207 | + | atomRowData.electricField[atom1] += *(idat.eField1); | 
| 1208 | + | atomColData.electricField[atom2] += *(idat.eField2); | 
| 1209 | + | } | 
| 1210 | + |  | 
| 1211 |  | #else | 
| 1212 |  | pairwisePot += *(idat.pot); | 
| 1213 |  |  | 
| 1214 |  | snap_->atomData.force[atom1] += *(idat.f1); | 
| 1215 |  | snap_->atomData.force[atom2] -= *(idat.f1); | 
| 1216 | + |  | 
| 1217 | + | if (idat.doParticlePot) { | 
| 1218 | + | // This is the pairwise contribution to the particle pot.  The | 
| 1219 | + | // embedding contribution is added in each of the low level | 
| 1220 | + | // non-bonded routines.  In parallel, this calculation is done | 
| 1221 | + | // in collectData, not in unpackInteractionData. | 
| 1222 | + | snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw); | 
| 1223 | + | snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw); | 
| 1224 | + | } | 
| 1225 | + |  | 
| 1226 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 1227 | + | snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1); | 
| 1228 | + | snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2); | 
| 1229 | + | } | 
| 1230 | + |  | 
| 1231 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 1232 | + | snap_->atomData.electricField[atom1] += *(idat.eField1); | 
| 1233 | + | snap_->atomData.electricField[atom2] += *(idat.eField2); | 
| 1234 | + | } | 
| 1235 | + |  | 
| 1236 |  | #endif | 
| 1237 |  |  | 
| 1238 |  | } | 
| 1417 |  | } | 
| 1418 |  | } | 
| 1419 |  | #else | 
| 1188 | – |  | 
| 1420 |  | for (vector<int>::iterator j1 = cellList_[m1].begin(); | 
| 1421 |  | j1 != cellList_[m1].end(); ++j1) { | 
| 1422 |  | for (vector<int>::iterator j2 = cellList_[m2].begin(); | 
| 1423 |  | j2 != cellList_[m2].end(); ++j2) { | 
| 1424 | < |  | 
| 1424 | > |  | 
| 1425 |  | // Always do this if we're in different cells or if | 
| 1426 | < | // we're in the same cell and the global index of the | 
| 1427 | < | // j2 cutoff group is less than the j1 cutoff group | 
| 1428 | < |  | 
| 1429 | < | if (m2 != m1 || (*j2) < (*j1)) { | 
| 1426 | > | // we're in the same cell and the global index of | 
| 1427 | > | // the j2 cutoff group is greater than or equal to | 
| 1428 | > | // the j1 cutoff group.  Note that Rappaport's code | 
| 1429 | > | // has a "less than" conditional here, but that | 
| 1430 | > | // deals with atom-by-atom computation.  OpenMD | 
| 1431 | > | // allows atoms within a single cutoff group to | 
| 1432 | > | // interact with each other. | 
| 1433 | > |  | 
| 1434 | > |  | 
| 1435 | > |  | 
| 1436 | > | if (m2 != m1 || (*j2) >= (*j1) ) { | 
| 1437 | > |  | 
| 1438 |  | dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; | 
| 1439 |  | snap_->wrapVector(dr); | 
| 1440 |  | cuts = getGroupCutoffs( (*j1), (*j2) ); | 
| 1453 |  | // branch to do all cutoff group pairs | 
| 1454 |  | #ifdef IS_MPI | 
| 1455 |  | for (int j1 = 0; j1 < nGroupsInRow_; j1++) { | 
| 1456 | < | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1456 | > | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1457 |  | dr = cgColData.position[j2] - cgRowData.position[j1]; | 
| 1458 |  | snap_->wrapVector(dr); | 
| 1459 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1461 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1462 |  | } | 
| 1463 |  | } | 
| 1464 | < | } | 
| 1464 | > | } | 
| 1465 |  | #else | 
| 1466 | < | for (int j1 = 0; j1 < nGroups_ - 1; j1++) { | 
| 1467 | < | for (int j2 = j1 + 1; j2 < nGroups_; j2++) { | 
| 1466 | > | // include all groups here. | 
| 1467 | > | for (int j1 = 0; j1 < nGroups_; j1++) { | 
| 1468 | > | // include self group interactions j2 == j1 | 
| 1469 | > | for (int j2 = j1; j2 < nGroups_; j2++) { | 
| 1470 |  | dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; | 
| 1471 |  | snap_->wrapVector(dr); | 
| 1472 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1473 |  | if (dr.lengthSquare() < cuts.third) { | 
| 1474 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1475 |  | } | 
| 1476 | < | } | 
| 1477 | < | } | 
| 1476 | > | } | 
| 1477 | > | } | 
| 1478 |  | #endif | 
| 1479 |  | } | 
| 1480 |  |  |