| 36 |  | * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | 
| 37 |  | * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | 
| 38 |  | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 | < | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 39 | > | * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010). | 
| 40 | > | * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). | 
| 41 |  | */ | 
| 42 |  | #include "parallel/ForceMatrixDecomposition.hpp" | 
| 43 |  | #include "math/SquareMatrix3.hpp" | 
| 234 |  | } | 
| 235 |  | } | 
| 236 |  |  | 
| 237 | < | #endif | 
| 237 | < |  | 
| 238 | < | // allocate memory for the parallel objects | 
| 239 | < | atypesLocal.resize(nLocal_); | 
| 240 | < |  | 
| 241 | < | for (int i = 0; i < nLocal_; i++) | 
| 242 | < | atypesLocal[i] = ff_->getAtomType(idents[i]); | 
| 243 | < |  | 
| 244 | < | groupList_.clear(); | 
| 245 | < | groupList_.resize(nGroups_); | 
| 246 | < | for (int i = 0; i < nGroups_; i++) { | 
| 247 | < | int gid = cgLocalToGlobal[i]; | 
| 248 | < | for (int j = 0; j < nLocal_; j++) { | 
| 249 | < | int aid = AtomLocalToGlobal[j]; | 
| 250 | < | if (globalGroupMembership[aid] == gid) { | 
| 251 | < | groupList_[i].push_back(j); | 
| 252 | < | } | 
| 253 | < | } | 
| 254 | < | } | 
| 255 | < |  | 
| 237 | > | #else | 
| 238 |  | excludesForAtom.clear(); | 
| 239 |  | excludesForAtom.resize(nLocal_); | 
| 240 |  | toposForAtom.clear(); | 
| 267 |  | } | 
| 268 |  | } | 
| 269 |  | } | 
| 270 | < |  | 
| 270 | > | #endif | 
| 271 | > |  | 
| 272 | > | // allocate memory for the parallel objects | 
| 273 | > | atypesLocal.resize(nLocal_); | 
| 274 | > |  | 
| 275 | > | for (int i = 0; i < nLocal_; i++) | 
| 276 | > | atypesLocal[i] = ff_->getAtomType(idents[i]); | 
| 277 | > |  | 
| 278 | > | groupList_.clear(); | 
| 279 | > | groupList_.resize(nGroups_); | 
| 280 | > | for (int i = 0; i < nGroups_; i++) { | 
| 281 | > | int gid = cgLocalToGlobal[i]; | 
| 282 | > | for (int j = 0; j < nLocal_; j++) { | 
| 283 | > | int aid = AtomLocalToGlobal[j]; | 
| 284 | > | if (globalGroupMembership[aid] == gid) { | 
| 285 | > | groupList_[i].push_back(j); | 
| 286 | > | } | 
| 287 | > | } | 
| 288 | > | } | 
| 289 | > |  | 
| 290 | > |  | 
| 291 |  | createGtypeCutoffMap(); | 
| 292 |  |  | 
| 293 |  | } | 
| 523 |  | atomRowData.skippedCharge.end(), 0.0); | 
| 524 |  | fill(atomColData.skippedCharge.begin(), | 
| 525 |  | atomColData.skippedCharge.end(), 0.0); | 
| 526 | + | } | 
| 527 | + |  | 
| 528 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 529 | + | fill(atomRowData.electricField.begin(), | 
| 530 | + | atomRowData.electricField.end(), V3Zero); | 
| 531 | + | fill(atomColData.electricField.begin(), | 
| 532 | + | atomColData.electricField.end(), V3Zero); | 
| 533 |  | } | 
| 534 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 535 | + | fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(), | 
| 536 | + | 0.0); | 
| 537 | + | fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(), | 
| 538 | + | 0.0); | 
| 539 | + | } | 
| 540 |  |  | 
| 541 |  | #endif | 
| 542 |  | // even in parallel, we need to zero out the local arrays: | 
| 550 |  | fill(snap_->atomData.density.begin(), | 
| 551 |  | snap_->atomData.density.end(), 0.0); | 
| 552 |  | } | 
| 553 | + |  | 
| 554 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 555 |  | fill(snap_->atomData.functional.begin(), | 
| 556 |  | snap_->atomData.functional.end(), 0.0); | 
| 557 |  | } | 
| 558 | + |  | 
| 559 |  | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 560 |  | fill(snap_->atomData.functionalDerivative.begin(), | 
| 561 |  | snap_->atomData.functionalDerivative.end(), 0.0); | 
| 562 |  | } | 
| 563 | + |  | 
| 564 |  | if (storageLayout_ & DataStorage::dslSkippedCharge) { | 
| 565 |  | fill(snap_->atomData.skippedCharge.begin(), | 
| 566 |  | snap_->atomData.skippedCharge.end(), 0.0); | 
| 567 |  | } | 
| 568 | < |  | 
| 568 | > |  | 
| 569 | > | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 570 | > | fill(snap_->atomData.electricField.begin(), | 
| 571 | > | snap_->atomData.electricField.end(), V3Zero); | 
| 572 | > | } | 
| 573 |  | } | 
| 574 |  |  | 
| 575 |  |  | 
| 609 |  | atomColData.electroFrame); | 
| 610 |  | } | 
| 611 |  |  | 
| 612 | + | // if needed, gather the atomic fluctuating charge values | 
| 613 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 614 | + | AtomPlanRealRow->gather(snap_->atomData.flucQPos, | 
| 615 | + | atomRowData.flucQPos); | 
| 616 | + | AtomPlanRealColumn->gather(snap_->atomData.flucQPos, | 
| 617 | + | atomColData.flucQPos); | 
| 618 | + | } | 
| 619 | + |  | 
| 620 |  | #endif | 
| 621 |  | } | 
| 622 |  |  | 
| 639 |  | for (int i = 0; i < n; i++) | 
| 640 |  | snap_->atomData.density[i] += rho_tmp[i]; | 
| 641 |  | } | 
| 642 | + |  | 
| 643 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 644 | + |  | 
| 645 | + | AtomPlanVectorRow->scatter(atomRowData.electricField, | 
| 646 | + | snap_->atomData.electricField); | 
| 647 | + |  | 
| 648 | + | int n = snap_->atomData.electricField.size(); | 
| 649 | + | vector<Vector3d> field_tmp(n, V3Zero); | 
| 650 | + | AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp); | 
| 651 | + | for (int i = 0; i < n; i++) | 
| 652 | + | snap_->atomData.electricField[i] += field_tmp[i]; | 
| 653 | + | } | 
| 654 |  | #endif | 
| 655 |  | } | 
| 656 |  |  | 
| 725 |  | } | 
| 726 |  |  | 
| 727 |  | AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); | 
| 728 | < | for (int i = 0; i < ns; i++) | 
| 728 | > | for (int i = 0; i < ns; i++) | 
| 729 |  | snap_->atomData.skippedCharge[i] += skch_tmp[i]; | 
| 730 | + |  | 
| 731 |  | } | 
| 732 |  |  | 
| 733 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 734 | + |  | 
| 735 | + | int nq = snap_->atomData.flucQFrc.size(); | 
| 736 | + | vector<RealType> fqfrc_tmp(nq, 0.0); | 
| 737 | + |  | 
| 738 | + | AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp); | 
| 739 | + | for (int i = 0; i < nq; i++) { | 
| 740 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 741 | + | fqfrc_tmp[i] = 0.0; | 
| 742 | + | } | 
| 743 | + |  | 
| 744 | + | AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp); | 
| 745 | + | for (int i = 0; i < nq; i++) | 
| 746 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 747 | + |  | 
| 748 | + | } | 
| 749 | + |  | 
| 750 |  | nLocal_ = snap_->getNumberOfAtoms(); | 
| 751 |  |  | 
| 752 |  | vector<potVec> pot_temp(nLocal_, | 
| 774 |  | pairwisePot[ii] = ploc2; | 
| 775 |  | } | 
| 776 |  |  | 
| 777 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 778 | + | RealType ploc1 = embeddingPot[ii]; | 
| 779 | + | RealType ploc2 = 0.0; | 
| 780 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 781 | + | embeddingPot[ii] = ploc2; | 
| 782 | + | } | 
| 783 | + |  | 
| 784 |  | #endif | 
| 785 |  |  | 
| 786 |  | } | 
| 893 |  | */ | 
| 894 |  | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { | 
| 895 |  | int unique_id_1, unique_id_2; | 
| 896 | < |  | 
| 896 | > |  | 
| 897 |  | #ifdef IS_MPI | 
| 898 |  | // in MPI, we have to look up the unique IDs for each atom | 
| 899 |  | unique_id_1 = AtomRowToGlobal[atom1]; | 
| 900 |  | unique_id_2 = AtomColToGlobal[atom2]; | 
| 901 | + | #else | 
| 902 | + | unique_id_1 = AtomLocalToGlobal[atom1]; | 
| 903 | + | unique_id_2 = AtomLocalToGlobal[atom2]; | 
| 904 | + | #endif | 
| 905 |  |  | 
| 835 | – | // this situation should only arise in MPI simulations | 
| 906 |  | if (unique_id_1 == unique_id_2) return true; | 
| 907 | < |  | 
| 907 | > |  | 
| 908 | > | #ifdef IS_MPI | 
| 909 |  | // this prevents us from doing the pair on multiple processors | 
| 910 |  | if (unique_id_1 < unique_id_2) { | 
| 911 |  | if ((unique_id_1 + unique_id_2) % 2 == 0) return true; | 
| 912 |  | } else { | 
| 913 | < | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 913 | > | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 914 |  | } | 
| 915 |  | #endif | 
| 916 | + |  | 
| 917 |  | return false; | 
| 918 |  | } | 
| 919 |  |  | 
| 927 |  | * field) must still be handled for these pairs. | 
| 928 |  | */ | 
| 929 |  | bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { | 
| 930 | < | int unique_id_2; | 
| 931 | < | #ifdef IS_MPI | 
| 932 | < | // in MPI, we have to look up the unique IDs for the row atom. | 
| 861 | < | unique_id_2 = AtomColToGlobal[atom2]; | 
| 862 | < | #else | 
| 863 | < | // in the normal loop, the atom numbers are unique | 
| 864 | < | unique_id_2 = atom2; | 
| 865 | < | #endif | 
| 930 | > |  | 
| 931 | > | // excludesForAtom was constructed to use row/column indices in the MPI | 
| 932 | > | // version, and to use local IDs in the non-MPI version: | 
| 933 |  |  | 
| 934 |  | for (vector<int>::iterator i = excludesForAtom[atom1].begin(); | 
| 935 |  | i != excludesForAtom[atom1].end(); ++i) { | 
| 936 | < | if ( (*i) == unique_id_2 ) return true; | 
| 936 | > | if ( (*i) == atom2 ) return true; | 
| 937 |  | } | 
| 938 |  |  | 
| 939 |  | return false; | 
| 1008 |  | } | 
| 1009 |  |  | 
| 1010 |  | #else | 
| 1011 | + |  | 
| 1012 |  |  | 
| 1013 | + | // cerr << "atoms = " << atom1 << " " << atom2 << "\n"; | 
| 1014 | + | // cerr << "pos1 = " << snap_->atomData.position[atom1] << "\n"; | 
| 1015 | + | // cerr << "pos2 = " << snap_->atomData.position[atom2] << "\n"; | 
| 1016 | + |  | 
| 1017 |  | idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]); | 
| 1018 |  | //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), | 
| 1019 |  | //                         ff_->getAtomType(idents[atom2]) ); | 
| 1063 |  |  | 
| 1064 |  | void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) { | 
| 1065 |  | #ifdef IS_MPI | 
| 1066 | < | pot_row[atom1] += 0.5 *  *(idat.pot); | 
| 1067 | < | pot_col[atom2] += 0.5 *  *(idat.pot); | 
| 1066 | > | pot_row[atom1] += RealType(0.5) *  *(idat.pot); | 
| 1067 | > | pot_col[atom2] += RealType(0.5) *  *(idat.pot); | 
| 1068 |  |  | 
| 1069 |  | atomRowData.force[atom1] += *(idat.f1); | 
| 1070 |  | atomColData.force[atom2] -= *(idat.f1); | 
| 1071 | + |  | 
| 1072 | + | // should particle pot be done here also? | 
| 1073 |  | #else | 
| 1074 |  | pairwisePot += *(idat.pot); | 
| 1075 |  |  | 
| 1076 |  | snap_->atomData.force[atom1] += *(idat.f1); | 
| 1077 |  | snap_->atomData.force[atom2] -= *(idat.f1); | 
| 1078 | + |  | 
| 1079 | + | if (idat.doParticlePot) { | 
| 1080 | + | snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw); | 
| 1081 | + | snap_->atomData.particlePot[atom2] -= *(idat.vpair) * *(idat.sw); | 
| 1082 | + | } | 
| 1083 | + |  | 
| 1084 |  | #endif | 
| 1085 |  |  | 
| 1086 |  | } | 
| 1265 |  | } | 
| 1266 |  | } | 
| 1267 |  | #else | 
| 1188 | – |  | 
| 1268 |  | for (vector<int>::iterator j1 = cellList_[m1].begin(); | 
| 1269 |  | j1 != cellList_[m1].end(); ++j1) { | 
| 1270 |  | for (vector<int>::iterator j2 = cellList_[m2].begin(); | 
| 1271 |  | j2 != cellList_[m2].end(); ++j2) { | 
| 1272 | < |  | 
| 1272 | > |  | 
| 1273 |  | // Always do this if we're in different cells or if | 
| 1274 | < | // we're in the same cell and the global index of the | 
| 1275 | < | // j2 cutoff group is less than the j1 cutoff group | 
| 1276 | < |  | 
| 1277 | < | if (m2 != m1 || (*j2) < (*j1)) { | 
| 1274 | > | // we're in the same cell and the global index of | 
| 1275 | > | // the j2 cutoff group is greater than or equal to | 
| 1276 | > | // the j1 cutoff group.  Note that Rappaport's code | 
| 1277 | > | // has a "less than" conditional here, but that | 
| 1278 | > | // deals with atom-by-atom computation.  OpenMD | 
| 1279 | > | // allows atoms within a single cutoff group to | 
| 1280 | > | // interact with each other. | 
| 1281 | > |  | 
| 1282 | > |  | 
| 1283 | > |  | 
| 1284 | > | if (m2 != m1 || (*j2) >= (*j1) ) { | 
| 1285 | > |  | 
| 1286 |  | dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; | 
| 1287 |  | snap_->wrapVector(dr); | 
| 1288 |  | cuts = getGroupCutoffs( (*j1), (*j2) ); | 
| 1301 |  | // branch to do all cutoff group pairs | 
| 1302 |  | #ifdef IS_MPI | 
| 1303 |  | for (int j1 = 0; j1 < nGroupsInRow_; j1++) { | 
| 1304 | < | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1304 | > | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1305 |  | dr = cgColData.position[j2] - cgRowData.position[j1]; | 
| 1306 |  | snap_->wrapVector(dr); | 
| 1307 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1309 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1310 |  | } | 
| 1311 |  | } | 
| 1312 | < | } | 
| 1312 | > | } | 
| 1313 |  | #else | 
| 1314 | < | for (int j1 = 0; j1 < nGroups_ - 1; j1++) { | 
| 1315 | < | for (int j2 = j1 + 1; j2 < nGroups_; j2++) { | 
| 1314 | > | // include all groups here. | 
| 1315 | > | for (int j1 = 0; j1 < nGroups_; j1++) { | 
| 1316 | > | // include self group interactions j2 == j1 | 
| 1317 | > | for (int j2 = j1; j2 < nGroups_; j2++) { | 
| 1318 |  | dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; | 
| 1319 |  | snap_->wrapVector(dr); | 
| 1320 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1321 |  | if (dr.lengthSquare() < cuts.third) { | 
| 1322 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1323 |  | } | 
| 1324 | < | } | 
| 1325 | < | } | 
| 1324 | > | } | 
| 1325 | > | } | 
| 1326 |  | #endif | 
| 1327 |  | } | 
| 1328 |  |  |