--- branches/development/src/parallel/ForceMatrixDecomposition.cpp 2011/07/15 21:35:14 1593 +++ branches/development/src/parallel/ForceMatrixDecomposition.cpp 2012/06/14 01:58:35 1755 @@ -36,7 +36,8 @@ * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). - * [4] Vardeman & Gezelter, in progress (2009). + * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). + * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). */ #include "parallel/ForceMatrixDecomposition.hpp" #include "math/SquareMatrix3.hpp" @@ -53,19 +54,34 @@ namespace OpenMD { // surrounding cells (not just the 14 upper triangular blocks that // are used when the processor can see all pairs) #ifdef IS_MPI - cellOffsets_.push_back( Vector3i(-1, 0, 0) ); - cellOffsets_.push_back( Vector3i(-1,-1, 0) ); - cellOffsets_.push_back( Vector3i( 0,-1, 0) ); - cellOffsets_.push_back( Vector3i( 1,-1, 0) ); - cellOffsets_.push_back( Vector3i( 0, 0,-1) ); - cellOffsets_.push_back( Vector3i(-1, 0, 1) ); + cellOffsets_.clear(); cellOffsets_.push_back( Vector3i(-1,-1,-1) ); cellOffsets_.push_back( Vector3i( 0,-1,-1) ); - cellOffsets_.push_back( Vector3i( 1,-1,-1) ); + cellOffsets_.push_back( Vector3i( 1,-1,-1) ); + cellOffsets_.push_back( Vector3i(-1, 0,-1) ); + cellOffsets_.push_back( Vector3i( 0, 0,-1) ); cellOffsets_.push_back( Vector3i( 1, 0,-1) ); - cellOffsets_.push_back( Vector3i( 1, 1,-1) ); - cellOffsets_.push_back( Vector3i( 0, 1,-1) ); cellOffsets_.push_back( Vector3i(-1, 1,-1) ); + cellOffsets_.push_back( Vector3i( 0, 1,-1) ); + cellOffsets_.push_back( Vector3i( 1, 1,-1) ); + cellOffsets_.push_back( Vector3i(-1,-1, 0) ); + cellOffsets_.push_back( Vector3i( 0,-1, 0) ); + cellOffsets_.push_back( Vector3i( 1,-1, 0) ); + cellOffsets_.push_back( Vector3i(-1, 0, 0) ); + cellOffsets_.push_back( Vector3i( 0, 0, 0) ); + cellOffsets_.push_back( Vector3i( 1, 0, 0) ); + cellOffsets_.push_back( Vector3i(-1, 1, 0) ); + cellOffsets_.push_back( Vector3i( 0, 1, 0) ); + cellOffsets_.push_back( Vector3i( 1, 1, 0) ); + cellOffsets_.push_back( Vector3i(-1,-1, 1) ); + cellOffsets_.push_back( Vector3i( 0,-1, 1) ); + cellOffsets_.push_back( Vector3i( 1,-1, 1) ); + cellOffsets_.push_back( Vector3i(-1, 0, 1) ); + cellOffsets_.push_back( Vector3i( 0, 0, 1) ); + cellOffsets_.push_back( Vector3i( 1, 0, 1) ); + cellOffsets_.push_back( Vector3i(-1, 1, 1) ); + cellOffsets_.push_back( Vector3i( 0, 1, 1) ); + cellOffsets_.push_back( Vector3i( 1, 1, 1) ); #endif } @@ -79,7 +95,7 @@ namespace OpenMD { storageLayout_ = sman_->getStorageLayout(); ff_ = info_->getForceField(); nLocal_ = snap_->getNumberOfAtoms(); - + nGroups_ = info_->getNLocalCutoffGroups(); // gather the information for atomtype IDs (atids): idents = info_->getIdentArray(); @@ -93,7 +109,13 @@ namespace OpenMD { PairList* oneTwo = info_->getOneTwoInteractions(); PairList* oneThree = info_->getOneThreeInteractions(); PairList* oneFour = info_->getOneFourInteractions(); - + + if (needVelocities_) + snap_->cgData.setStorageLayout(DataStorage::dslPosition | + DataStorage::dslVelocity); + else + snap_->cgData.setStorageLayout(DataStorage::dslPosition); + #ifdef IS_MPI MPI::Intracomm row = rowComm.getComm(); @@ -129,8 +151,13 @@ namespace OpenMD { cgRowData.resize(nGroupsInRow_); cgRowData.setStorageLayout(DataStorage::dslPosition); cgColData.resize(nGroupsInCol_); - cgColData.setStorageLayout(DataStorage::dslPosition); - + if (needVelocities_) + // we only need column velocities if we need them. + cgColData.setStorageLayout(DataStorage::dslPosition | + DataStorage::dslVelocity); + else + cgColData.setStorageLayout(DataStorage::dslPosition); + identsRow.resize(nAtomsInRow_); identsCol.resize(nAtomsInCol_); @@ -154,38 +181,11 @@ namespace OpenMD { AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); - cerr << "Atoms in Local:\n"; - for (int i = 0; i < AtomLocalToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t localAt =\t" << AtomLocalToGlobal[i] << "\n"; - } - cerr << "Atoms in Row:\n"; - for (int i = 0; i < AtomRowToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t rowAt =\t" << AtomRowToGlobal[i] << "\n"; - } - cerr << "Atoms in Col:\n"; - for (int i = 0; i < AtomColToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t colAt =\t" << AtomColToGlobal[i] << "\n"; - } - cgRowToGlobal.resize(nGroupsInRow_); cgColToGlobal.resize(nGroupsInCol_); cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal); cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal); - cerr << "Gruops in Local:\n"; - for (int i = 0; i < cgLocalToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t localCG =\t" << cgLocalToGlobal[i] << "\n"; - } - cerr << "Groups in Row:\n"; - for (int i = 0; i < cgRowToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t rowCG =\t" << cgRowToGlobal[i] << "\n"; - } - cerr << "Groups in Col:\n"; - for (int i = 0; i < cgColToGlobal.size(); i++) { - cerr << "i =\t" << i << "\t colCG =\t" << cgColToGlobal[i] << "\n"; - } - - massFactorsRow.resize(nAtomsInRow_); massFactorsCol.resize(nAtomsInCol_); AtomPlanRealRow->gather(massFactors, massFactorsRow); @@ -245,26 +245,7 @@ namespace OpenMD { } } -#endif - - // allocate memory for the parallel objects - atypesLocal.resize(nLocal_); - - for (int i = 0; i < nLocal_; i++) - atypesLocal[i] = ff_->getAtomType(idents[i]); - - groupList_.clear(); - groupList_.resize(nGroups_); - for (int i = 0; i < nGroups_; i++) { - int gid = cgLocalToGlobal[i]; - for (int j = 0; j < nLocal_; j++) { - int aid = AtomLocalToGlobal[j]; - if (globalGroupMembership[aid] == gid) { - groupList_[i].push_back(j); - } - } - } - +#else excludesForAtom.clear(); excludesForAtom.resize(nLocal_); toposForAtom.clear(); @@ -297,7 +278,27 @@ namespace OpenMD { } } } - +#endif + + // allocate memory for the parallel objects + atypesLocal.resize(nLocal_); + + for (int i = 0; i < nLocal_; i++) + atypesLocal[i] = ff_->getAtomType(idents[i]); + + groupList_.clear(); + groupList_.resize(nGroups_); + for (int i = 0; i < nGroups_; i++) { + int gid = cgLocalToGlobal[i]; + for (int j = 0; j < nLocal_; j++) { + int aid = AtomLocalToGlobal[j]; + if (globalGroupMembership[aid] == gid) { + groupList_[i].push_back(j); + } + } + } + + createGtypeCutoffMap(); } @@ -533,8 +534,29 @@ namespace OpenMD { atomRowData.skippedCharge.end(), 0.0); fill(atomColData.skippedCharge.begin(), atomColData.skippedCharge.end(), 0.0); + } + + if (storageLayout_ & DataStorage::dslFlucQForce) { + fill(atomRowData.flucQFrc.begin(), + atomRowData.flucQFrc.end(), 0.0); + fill(atomColData.flucQFrc.begin(), + atomColData.flucQFrc.end(), 0.0); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + fill(atomRowData.electricField.begin(), + atomRowData.electricField.end(), V3Zero); + fill(atomColData.electricField.begin(), + atomColData.electricField.end(), V3Zero); } + if (storageLayout_ & DataStorage::dslFlucQForce) { + fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(), + 0.0); + fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(), + 0.0); + } + #endif // even in parallel, we need to zero out the local arrays: @@ -547,19 +569,26 @@ namespace OpenMD { fill(snap_->atomData.density.begin(), snap_->atomData.density.end(), 0.0); } + if (storageLayout_ & DataStorage::dslFunctional) { fill(snap_->atomData.functional.begin(), snap_->atomData.functional.end(), 0.0); } + if (storageLayout_ & DataStorage::dslFunctionalDerivative) { fill(snap_->atomData.functionalDerivative.begin(), snap_->atomData.functionalDerivative.end(), 0.0); } + if (storageLayout_ & DataStorage::dslSkippedCharge) { fill(snap_->atomData.skippedCharge.begin(), snap_->atomData.skippedCharge.end(), 0.0); } - + + if (storageLayout_ & DataStorage::dslElectricField) { + fill(snap_->atomData.electricField.begin(), + snap_->atomData.electricField.end(), V3Zero); + } } @@ -576,23 +605,21 @@ namespace OpenMD { // gather up the cutoff group positions - cerr << "before gather\n"; - for (int i = 0; i < snap_->cgData.position.size(); i++) { - cerr << "cgpos = " << snap_->cgData.position[i] << "\n"; - } - cgPlanVectorRow->gather(snap_->cgData.position, cgRowData.position); - cerr << "after gather\n"; - for (int i = 0; i < cgRowData.position.size(); i++) { - cerr << "cgRpos = " << cgRowData.position[i] << "\n"; - } - cgPlanVectorColumn->gather(snap_->cgData.position, cgColData.position); - for (int i = 0; i < cgColData.position.size(); i++) { - cerr << "cgCpos = " << cgColData.position[i] << "\n"; + + + + if (needVelocities_) { + // gather up the atomic velocities + AtomPlanVectorColumn->gather(snap_->atomData.velocity, + atomColData.velocity); + + cgPlanVectorColumn->gather(snap_->cgData.velocity, + cgColData.velocity); } @@ -612,6 +639,14 @@ namespace OpenMD { atomColData.electroFrame); } + // if needed, gather the atomic fluctuating charge values + if (storageLayout_ & DataStorage::dslFlucQPosition) { + AtomPlanRealRow->gather(snap_->atomData.flucQPos, + atomRowData.flucQPos); + AtomPlanRealColumn->gather(snap_->atomData.flucQPos, + atomColData.flucQPos); + } + #endif } @@ -634,6 +669,18 @@ namespace OpenMD { for (int i = 0; i < n; i++) snap_->atomData.density[i] += rho_tmp[i]; } + + if (storageLayout_ & DataStorage::dslElectricField) { + + AtomPlanVectorRow->scatter(atomRowData.electricField, + snap_->atomData.electricField); + + int n = snap_->atomData.electricField.size(); + vector field_tmp(n, V3Zero); + AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp); + for (int i = 0; i < n; i++) + snap_->atomData.electricField[i] += field_tmp[i]; + } #endif } @@ -708,10 +755,28 @@ namespace OpenMD { } AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); - for (int i = 0; i < ns; i++) + for (int i = 0; i < ns; i++) snap_->atomData.skippedCharge[i] += skch_tmp[i]; + } + if (storageLayout_ & DataStorage::dslFlucQForce) { + + int nq = snap_->atomData.flucQFrc.size(); + vector fqfrc_tmp(nq, 0.0); + + AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp); + for (int i = 0; i < nq; i++) { + snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; + fqfrc_tmp[i] = 0.0; + } + + AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp); + for (int i = 0; i < nq; i++) + snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; + + } + nLocal_ = snap_->getNumberOfAtoms(); vector pot_temp(nLocal_, @@ -723,7 +788,21 @@ namespace OpenMD { for (int ii = 0; ii < pot_temp.size(); ii++ ) pairwisePot += pot_temp[ii]; - + + if (storageLayout_ & DataStorage::dslParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In single processor, this is done in + // unpackInteractionData, not in collectData. + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + for (int i = 0; i < nLocal_; i++) { + // factor of two is because the total potential terms are divided + // by 2 in parallel due to row/ column scatter + snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); + } + } + } + fill(pot_temp.begin(), pot_temp.end(), Vector (0.0)); @@ -731,9 +810,65 @@ namespace OpenMD { for (int ii = 0; ii < pot_temp.size(); ii++ ) pairwisePot += pot_temp[ii]; + + if (storageLayout_ & DataStorage::dslParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In single processor, this is done in + // unpackInteractionData, not in collectData. + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + for (int i = 0; i < nLocal_; i++) { + // factor of two is because the total potential terms are divided + // by 2 in parallel due to row/ column scatter + snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); + } + } + } + + if (storageLayout_ & DataStorage::dslParticlePot) { + int npp = snap_->atomData.particlePot.size(); + vector ppot_temp(npp, 0.0); + + // This is the direct or embedding contribution to the particle + // pot. + + AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp); + for (int i = 0; i < npp; i++) { + snap_->atomData.particlePot[i] += ppot_temp[i]; + } + + fill(ppot_temp.begin(), ppot_temp.end(), 0.0); + + AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp); + for (int i = 0; i < npp; i++) { + snap_->atomData.particlePot[i] += ppot_temp[i]; + } + } + + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = pairwisePot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + pairwisePot[ii] = ploc2; + } + + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = embeddingPot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + embeddingPot[ii] = ploc2; + } + + // Here be dragons. + MPI::Intracomm col = colComm.getComm(); + + col.Allreduce(MPI::IN_PLACE, + &snap_->frameData.conductiveHeatFlux[0], 3, + MPI::REALTYPE, MPI::SUM); + + #endif - cerr << "pairwisePot = " << pairwisePot << "\n"; } int ForceMatrixDecomposition::getNAtomsInRow() { @@ -768,19 +903,31 @@ namespace OpenMD { #ifdef IS_MPI d = cgColData.position[cg2] - cgRowData.position[cg1]; - cerr << "cg1 = " << cg1 << "\tcg1p = " << cgRowData.position[cg1] << "\n"; - cerr << "cg2 = " << cg2 << "\tcg2p = " << cgColData.position[cg2] << "\n"; #else d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1]; - cerr << "cg1 = " << cg1 << "\tcg1p = " << snap_->cgData.position[cg1] << "\n"; - cerr << "cg2 = " << cg2 << "\tcg2p = " << snap_->cgData.position[cg2] << "\n"; #endif snap_->wrapVector(d); return d; } + Vector3d ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){ +#ifdef IS_MPI + return cgColData.velocity[cg2]; +#else + return snap_->cgData.velocity[cg2]; +#endif + } + Vector3d ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){ +#ifdef IS_MPI + return atomColData.velocity[atom2]; +#else + return snap_->atomData.velocity[atom2]; +#endif + } + + Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ Vector3d d; @@ -848,25 +995,27 @@ namespace OpenMD { */ bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { int unique_id_1, unique_id_2; - - - cerr << "sap with atom1, atom2 =\t" << atom1 << "\t" << atom2 << "\n"; + #ifdef IS_MPI // in MPI, we have to look up the unique IDs for each atom unique_id_1 = AtomRowToGlobal[atom1]; unique_id_2 = AtomColToGlobal[atom2]; +#else + unique_id_1 = AtomLocalToGlobal[atom1]; + unique_id_2 = AtomLocalToGlobal[atom2]; +#endif - cerr << "sap with uid1, uid2 =\t" << unique_id_1 << "\t" << unique_id_2 << "\n"; - // this situation should only arise in MPI simulations if (unique_id_1 == unique_id_2) return true; - + +#ifdef IS_MPI // this prevents us from doing the pair on multiple processors if (unique_id_1 < unique_id_2) { if ((unique_id_1 + unique_id_2) % 2 == 0) return true; } else { - if ((unique_id_1 + unique_id_2) % 2 == 1) return true; + if ((unique_id_1 + unique_id_2) % 2 == 1) return true; } #endif + return false; } @@ -880,18 +1029,13 @@ namespace OpenMD { * field) must still be handled for these pairs. */ bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { - int unique_id_2; -#ifdef IS_MPI - // in MPI, we have to look up the unique IDs for the row atom. - unique_id_2 = AtomColToGlobal[atom2]; -#else - // in the normal loop, the atom numbers are unique - unique_id_2 = atom2; -#endif + + // excludesForAtom was constructed to use row/column indices in the MPI + // version, and to use local IDs in the non-MPI version: for (vector::iterator i = excludesForAtom[atom1].begin(); i != excludesForAtom[atom1].end(); ++i) { - if ( (*i) == unique_id_2 ) return true; + if ( (*i) == atom2 ) return true; } return false; @@ -965,11 +1109,14 @@ namespace OpenMD { idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]); } -#else + if (storageLayout_ & DataStorage::dslFlucQPosition) { + idat.flucQ1 = &(atomRowData.flucQPos[atom1]); + idat.flucQ2 = &(atomColData.flucQPos[atom2]); + } +#else + idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]); - //idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), - // ff_->getAtomType(idents[atom2]) ); if (storageLayout_ & DataStorage::dslAmat) { idat.A1 = &(snap_->atomData.aMat[atom1]); @@ -1010,22 +1157,59 @@ namespace OpenMD { idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]); idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]); } + + if (storageLayout_ & DataStorage::dslFlucQPosition) { + idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]); + idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]); + } + #endif } void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) { #ifdef IS_MPI - pot_row[atom1] += 0.5 * *(idat.pot); - pot_col[atom2] += 0.5 * *(idat.pot); + pot_row[atom1] += RealType(0.5) * *(idat.pot); + pot_col[atom2] += RealType(0.5) * *(idat.pot); atomRowData.force[atom1] += *(idat.f1); atomColData.force[atom2] -= *(idat.f1); + + if (storageLayout_ & DataStorage::dslFlucQForce) { + atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1); + atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + atomRowData.electricField[atom1] += *(idat.eField1); + atomColData.electricField[atom2] += *(idat.eField2); + } + #else pairwisePot += *(idat.pot); snap_->atomData.force[atom1] += *(idat.f1); snap_->atomData.force[atom2] -= *(idat.f1); + + if (idat.doParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In parallel, this calculation is done + // in collectData, not in unpackInteractionData. + snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw); + snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw); + } + + if (storageLayout_ & DataStorage::dslFlucQForce) { + snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1); + snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + snap_->atomData.electricField[atom1] += *(idat.eField1); + snap_->atomData.electricField[atom2] += *(idat.eField2); + } + #endif } @@ -1131,6 +1315,7 @@ namespace OpenMD { // add this cutoff group to the list of groups in this cell; cellListCol_[cellIndex].push_back(i); } + #else for (int i = 0; i < nGroups_; i++) { rs = snap_->cgData.position[i]; @@ -1156,6 +1341,7 @@ namespace OpenMD { // add this cutoff group to the list of groups in this cell; cellList_[cellIndex].push_back(i); } + #endif for (int m1z = 0; m1z < nCells_.z(); m1z++) { @@ -1168,7 +1354,8 @@ namespace OpenMD { os != cellOffsets_.end(); ++os) { Vector3i m2v = m1v + (*os); - + + if (m2v.x() >= nCells_.x()) { m2v.x() = 0; } else if (m2v.x() < 0) { @@ -1186,7 +1373,7 @@ namespace OpenMD { } else if (m2v.z() < 0) { m2v.z() = nCells_.z() - 1; } - + int m2 = Vlinear (m2v, nCells_); #ifdef IS_MPI @@ -1195,8 +1382,9 @@ namespace OpenMD { for (vector::iterator j2 = cellListCol_[m2].begin(); j2 != cellListCol_[m2].end(); ++j2) { - // In parallel, we need to visit *all* pairs of row & - // column indicies and will truncate later on. + // In parallel, we need to visit *all* pairs of row + // & column indicies and will divide labor in the + // force evaluation later. dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)]; snap_->wrapVector(dr); cuts = getGroupCutoffs( (*j1), (*j2) ); @@ -1206,17 +1394,24 @@ namespace OpenMD { } } #else - for (vector::iterator j1 = cellList_[m1].begin(); j1 != cellList_[m1].end(); ++j1) { for (vector::iterator j2 = cellList_[m2].begin(); j2 != cellList_[m2].end(); ++j2) { - + // Always do this if we're in different cells or if - // we're in the same cell and the global index of the - // j2 cutoff group is less than the j1 cutoff group - - if (m2 != m1 || (*j2) < (*j1)) { + // we're in the same cell and the global index of + // the j2 cutoff group is greater than or equal to + // the j1 cutoff group. Note that Rappaport's code + // has a "less than" conditional here, but that + // deals with atom-by-atom computation. OpenMD + // allows atoms within a single cutoff group to + // interact with each other. + + + + if (m2 != m1 || (*j2) >= (*j1) ) { + dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; snap_->wrapVector(dr); cuts = getGroupCutoffs( (*j1), (*j2) ); @@ -1235,7 +1430,7 @@ namespace OpenMD { // branch to do all cutoff group pairs #ifdef IS_MPI for (int j1 = 0; j1 < nGroupsInRow_; j1++) { - for (int j2 = 0; j2 < nGroupsInCol_; j2++) { + for (int j2 = 0; j2 < nGroupsInCol_; j2++) { dr = cgColData.position[j2] - cgRowData.position[j1]; snap_->wrapVector(dr); cuts = getGroupCutoffs( j1, j2 ); @@ -1243,18 +1438,20 @@ namespace OpenMD { neighborList.push_back(make_pair(j1, j2)); } } - } + } #else - for (int j1 = 0; j1 < nGroups_ - 1; j1++) { - for (int j2 = j1 + 1; j2 < nGroups_; j2++) { + // include all groups here. + for (int j1 = 0; j1 < nGroups_; j1++) { + // include self group interactions j2 == j1 + for (int j2 = j1; j2 < nGroups_; j2++) { dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; snap_->wrapVector(dr); cuts = getGroupCutoffs( j1, j2 ); if (dr.lengthSquare() < cuts.third) { neighborList.push_back(make_pair(j1, j2)); } - } - } + } + } #endif }