--- branches/development/src/parallel/ForceMatrixDecomposition.cpp 2011/06/16 22:00:08 1583 +++ branches/development/src/parallel/ForceMatrixDecomposition.cpp 2012/06/22 20:01:37 1761 @@ -36,7 +36,8 @@ * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). - * [4] Vardeman & Gezelter, in progress (2009). + * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). + * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). */ #include "parallel/ForceMatrixDecomposition.hpp" #include "math/SquareMatrix3.hpp" @@ -47,54 +48,101 @@ namespace OpenMD { using namespace std; namespace OpenMD { + ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) { + + // In a parallel computation, row and colum scans must visit all + // surrounding cells (not just the 14 upper triangular blocks that + // are used when the processor can see all pairs) +#ifdef IS_MPI + cellOffsets_.clear(); + cellOffsets_.push_back( Vector3i(-1,-1,-1) ); + cellOffsets_.push_back( Vector3i( 0,-1,-1) ); + cellOffsets_.push_back( Vector3i( 1,-1,-1) ); + cellOffsets_.push_back( Vector3i(-1, 0,-1) ); + cellOffsets_.push_back( Vector3i( 0, 0,-1) ); + cellOffsets_.push_back( Vector3i( 1, 0,-1) ); + cellOffsets_.push_back( Vector3i(-1, 1,-1) ); + cellOffsets_.push_back( Vector3i( 0, 1,-1) ); + cellOffsets_.push_back( Vector3i( 1, 1,-1) ); + cellOffsets_.push_back( Vector3i(-1,-1, 0) ); + cellOffsets_.push_back( Vector3i( 0,-1, 0) ); + cellOffsets_.push_back( Vector3i( 1,-1, 0) ); + cellOffsets_.push_back( Vector3i(-1, 0, 0) ); + cellOffsets_.push_back( Vector3i( 0, 0, 0) ); + cellOffsets_.push_back( Vector3i( 1, 0, 0) ); + cellOffsets_.push_back( Vector3i(-1, 1, 0) ); + cellOffsets_.push_back( Vector3i( 0, 1, 0) ); + cellOffsets_.push_back( Vector3i( 1, 1, 0) ); + cellOffsets_.push_back( Vector3i(-1,-1, 1) ); + cellOffsets_.push_back( Vector3i( 0,-1, 1) ); + cellOffsets_.push_back( Vector3i( 1,-1, 1) ); + cellOffsets_.push_back( Vector3i(-1, 0, 1) ); + cellOffsets_.push_back( Vector3i( 0, 0, 1) ); + cellOffsets_.push_back( Vector3i( 1, 0, 1) ); + cellOffsets_.push_back( Vector3i(-1, 1, 1) ); + cellOffsets_.push_back( Vector3i( 0, 1, 1) ); + cellOffsets_.push_back( Vector3i( 1, 1, 1) ); +#endif + } + + /** * distributeInitialData is essentially a copy of the older fortran * SimulationSetup */ - void ForceMatrixDecomposition::distributeInitialData() { snap_ = sman_->getCurrentSnapshot(); storageLayout_ = sman_->getStorageLayout(); ff_ = info_->getForceField(); nLocal_ = snap_->getNumberOfAtoms(); - + nGroups_ = info_->getNLocalCutoffGroups(); - cerr << "in dId, nGroups = " << nGroups_ << "\n"; // gather the information for atomtype IDs (atids): idents = info_->getIdentArray(); AtomLocalToGlobal = info_->getGlobalAtomIndices(); cgLocalToGlobal = info_->getGlobalGroupIndices(); vector globalGroupMembership = info_->getGlobalGroupMembership(); + massFactors = info_->getMassFactors(); - PairList excludes = info_->getExcludedInteractions(); - PairList oneTwo = info_->getOneTwoInteractions(); - PairList oneThree = info_->getOneThreeInteractions(); - PairList oneFour = info_->getOneFourInteractions(); + PairList* excludes = info_->getExcludedInteractions(); + PairList* oneTwo = info_->getOneTwoInteractions(); + PairList* oneThree = info_->getOneThreeInteractions(); + PairList* oneFour = info_->getOneFourInteractions(); + + if (needVelocities_) + snap_->cgData.setStorageLayout(DataStorage::dslPosition | + DataStorage::dslVelocity); + else + snap_->cgData.setStorageLayout(DataStorage::dslPosition); + #ifdef IS_MPI - AtomCommIntRow = new Communicator(nLocal_); - AtomCommRealRow = new Communicator(nLocal_); - AtomCommVectorRow = new Communicator(nLocal_); - AtomCommMatrixRow = new Communicator(nLocal_); - AtomCommPotRow = new Communicator(nLocal_); + MPI::Intracomm row = rowComm.getComm(); + MPI::Intracomm col = colComm.getComm(); - AtomCommIntColumn = new Communicator(nLocal_); - AtomCommRealColumn = new Communicator(nLocal_); - AtomCommVectorColumn = new Communicator(nLocal_); - AtomCommMatrixColumn = new Communicator(nLocal_); - AtomCommPotColumn = new Communicator(nLocal_); + AtomPlanIntRow = new Plan(row, nLocal_); + AtomPlanRealRow = new Plan(row, nLocal_); + AtomPlanVectorRow = new Plan(row, nLocal_); + AtomPlanMatrixRow = new Plan(row, nLocal_); + AtomPlanPotRow = new Plan(row, nLocal_); - cgCommIntRow = new Communicator(nGroups_); - cgCommVectorRow = new Communicator(nGroups_); - cgCommIntColumn = new Communicator(nGroups_); - cgCommVectorColumn = new Communicator(nGroups_); + AtomPlanIntColumn = new Plan(col, nLocal_); + AtomPlanRealColumn = new Plan(col, nLocal_); + AtomPlanVectorColumn = new Plan(col, nLocal_); + AtomPlanMatrixColumn = new Plan(col, nLocal_); + AtomPlanPotColumn = new Plan(col, nLocal_); - nAtomsInRow_ = AtomCommIntRow->getSize(); - nAtomsInCol_ = AtomCommIntColumn->getSize(); - nGroupsInRow_ = cgCommIntRow->getSize(); - nGroupsInCol_ = cgCommIntColumn->getSize(); + cgPlanIntRow = new Plan(row, nGroups_); + cgPlanVectorRow = new Plan(row, nGroups_); + cgPlanIntColumn = new Plan(col, nGroups_); + cgPlanVectorColumn = new Plan(col, nGroups_); + nAtomsInRow_ = AtomPlanIntRow->getSize(); + nAtomsInCol_ = AtomPlanIntColumn->getSize(); + nGroupsInRow_ = cgPlanIntRow->getSize(); + nGroupsInCol_ = cgPlanIntColumn->getSize(); + // Modify the data storage objects with the correct layouts and sizes: atomRowData.resize(nAtomsInRow_); atomRowData.setStorageLayout(storageLayout_); @@ -103,23 +151,49 @@ namespace OpenMD { cgRowData.resize(nGroupsInRow_); cgRowData.setStorageLayout(DataStorage::dslPosition); cgColData.resize(nGroupsInCol_); - cgColData.setStorageLayout(DataStorage::dslPosition); - + if (needVelocities_) + // we only need column velocities if we need them. + cgColData.setStorageLayout(DataStorage::dslPosition | + DataStorage::dslVelocity); + else + cgColData.setStorageLayout(DataStorage::dslPosition); + identsRow.resize(nAtomsInRow_); identsCol.resize(nAtomsInCol_); - AtomCommIntRow->gather(idents, identsRow); - AtomCommIntColumn->gather(idents, identsCol); + AtomPlanIntRow->gather(idents, identsRow); + AtomPlanIntColumn->gather(idents, identsCol); - AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); - AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); - - cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal); - cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal); + // allocate memory for the parallel objects + atypesRow.resize(nAtomsInRow_); + atypesCol.resize(nAtomsInCol_); - AtomCommRealRow->gather(massFactors, massFactorsRow); - AtomCommRealColumn->gather(massFactors, massFactorsCol); + for (int i = 0; i < nAtomsInRow_; i++) + atypesRow[i] = ff_->getAtomType(identsRow[i]); + for (int i = 0; i < nAtomsInCol_; i++) + atypesCol[i] = ff_->getAtomType(identsCol[i]); + pot_row.resize(nAtomsInRow_); + pot_col.resize(nAtomsInCol_); + + expot_row.resize(nAtomsInRow_); + expot_col.resize(nAtomsInCol_); + + AtomRowToGlobal.resize(nAtomsInRow_); + AtomColToGlobal.resize(nAtomsInCol_); + AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); + AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); + + cgRowToGlobal.resize(nGroupsInRow_); + cgColToGlobal.resize(nGroupsInCol_); + cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal); + cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal); + + massFactorsRow.resize(nAtomsInRow_); + massFactorsCol.resize(nAtomsInCol_); + AtomPlanRealRow->gather(massFactors, massFactorsRow); + AtomPlanRealColumn->gather(massFactors, massFactorsCol); + groupListRow_.clear(); groupListRow_.resize(nGroupsInRow_); for (int i = 0; i < nGroupsInRow_; i++) { @@ -142,8 +216,8 @@ namespace OpenMD { } } - skipsForAtom.clear(); - skipsForAtom.resize(nAtomsInRow_); + excludesForAtom.clear(); + excludesForAtom.resize(nAtomsInRow_); toposForAtom.clear(); toposForAtom.resize(nAtomsInRow_); topoDist.clear(); @@ -154,18 +228,18 @@ namespace OpenMD { for (int j = 0; j < nAtomsInCol_; j++) { int jglob = AtomColToGlobal[j]; - if (excludes.hasPair(iglob, jglob)) - skipsForAtom[i].push_back(j); + if (excludes->hasPair(iglob, jglob)) + excludesForAtom[i].push_back(j); - if (oneTwo.hasPair(iglob, jglob)) { + if (oneTwo->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(1); } else { - if (oneThree.hasPair(iglob, jglob)) { + if (oneThree->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(2); } else { - if (oneFour.hasPair(iglob, jglob)) { + if (oneFour->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(3); } @@ -174,22 +248,9 @@ namespace OpenMD { } } -#endif - - groupList_.clear(); - groupList_.resize(nGroups_); - for (int i = 0; i < nGroups_; i++) { - int gid = cgLocalToGlobal[i]; - for (int j = 0; j < nLocal_; j++) { - int aid = AtomLocalToGlobal[j]; - if (globalGroupMembership[aid] == gid) { - groupList_[i].push_back(j); - } - } - } - - skipsForAtom.clear(); - skipsForAtom.resize(nLocal_); +#else + excludesForAtom.clear(); + excludesForAtom.resize(nLocal_); toposForAtom.clear(); toposForAtom.resize(nLocal_); topoDist.clear(); @@ -201,18 +262,18 @@ namespace OpenMD { for (int j = 0; j < nLocal_; j++) { int jglob = AtomLocalToGlobal[j]; - if (excludes.hasPair(iglob, jglob)) - skipsForAtom[i].push_back(j); + if (excludes->hasPair(iglob, jglob)) + excludesForAtom[i].push_back(j); - if (oneTwo.hasPair(iglob, jglob)) { + if (oneTwo->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(1); } else { - if (oneThree.hasPair(iglob, jglob)) { + if (oneThree->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(2); } else { - if (oneFour.hasPair(iglob, jglob)) { + if (oneFour->hasPair(iglob, jglob)) { toposForAtom[i].push_back(j); topoDist[i].push_back(3); } @@ -220,31 +281,51 @@ namespace OpenMD { } } } - +#endif + + // allocate memory for the parallel objects + atypesLocal.resize(nLocal_); + + for (int i = 0; i < nLocal_; i++) + atypesLocal[i] = ff_->getAtomType(idents[i]); + + groupList_.clear(); + groupList_.resize(nGroups_); + for (int i = 0; i < nGroups_; i++) { + int gid = cgLocalToGlobal[i]; + for (int j = 0; j < nLocal_; j++) { + int aid = AtomLocalToGlobal[j]; + if (globalGroupMembership[aid] == gid) { + groupList_[i].push_back(j); + } + } + } + + createGtypeCutoffMap(); + } void ForceMatrixDecomposition::createGtypeCutoffMap() { - + RealType tol = 1e-6; + largestRcut_ = 0.0; RealType rc; int atid; set atypes = info_->getSimulatedAtomTypes(); - vector atypeCutoff; - atypeCutoff.resize( atypes.size() ); + + map atypeCutoff; for (set::iterator at = atypes.begin(); at != atypes.end(); ++at){ atid = (*at)->getIdent(); - - if (userChoseCutoff_) + if (userChoseCutoff_) atypeCutoff[atid] = userCutoff_; else atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at); } - + vector gTypeCutoffs; - // first we do a single loop over the cutoff groups to find the // largest cutoff for any atypes present in this group. #ifdef IS_MPI @@ -302,22 +383,17 @@ namespace OpenMD { vector groupCutoff(nGroups_, 0.0); groupToGtype.resize(nGroups_); - - cerr << "nGroups = " << nGroups_ << "\n"; for (int cg1 = 0; cg1 < nGroups_; cg1++) { - groupCutoff[cg1] = 0.0; vector atomList = getAtomsInGroupRow(cg1); - for (vector::iterator ia = atomList.begin(); ia != atomList.end(); ++ia) { int atom1 = (*ia); atid = idents[atom1]; - if (atypeCutoff[atid] > groupCutoff[cg1]) { + if (atypeCutoff[atid] > groupCutoff[cg1]) groupCutoff[cg1] = atypeCutoff[atid]; - } } - + bool gTypeFound = false; for (int gt = 0; gt < gTypeCutoffs.size(); gt++) { if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) { @@ -325,20 +401,21 @@ namespace OpenMD { gTypeFound = true; } } - if (!gTypeFound) { + if (!gTypeFound) { gTypeCutoffs.push_back( groupCutoff[cg1] ); groupToGtype[cg1] = gTypeCutoffs.size() - 1; } } #endif - cerr << "gTypeCutoffs.size() = " << gTypeCutoffs.size() << "\n"; // Now we find the maximum group cutoff value present in the simulation - RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end()); + RealType groupMax = *max_element(gTypeCutoffs.begin(), + gTypeCutoffs.end()); #ifdef IS_MPI - MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX); + MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, + MPI::MAX); #endif RealType tradRcut = groupMax; @@ -368,13 +445,9 @@ namespace OpenMD { pair key = make_pair(i,j); gTypeCutoffMap[key].first = thisRcut; - if (thisRcut > largestRcut_) largestRcut_ = thisRcut; - gTypeCutoffMap[key].second = thisRcut*thisRcut; - gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2); - // sanity check if (userChoseCutoff_) { @@ -391,7 +464,6 @@ namespace OpenMD { } } - groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) { int i, j; #ifdef IS_MPI @@ -415,6 +487,8 @@ namespace OpenMD { void ForceMatrixDecomposition::zeroWorkArrays() { pairwisePot = 0.0; embeddingPot = 0.0; + excludedPot = 0.0; + excludedSelfPot = 0.0; #ifdef IS_MPI if (storageLayout_ & DataStorage::dslForce) { @@ -433,9 +507,17 @@ namespace OpenMD { fill(pot_col.begin(), pot_col.end(), Vector (0.0)); + fill(expot_row.begin(), expot_row.end(), + Vector (0.0)); + + fill(expot_col.begin(), expot_col.end(), + Vector (0.0)); + if (storageLayout_ & DataStorage::dslParticlePot) { - fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0); - fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0); + fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), + 0.0); + fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), + 0.0); } if (storageLayout_ & DataStorage::dslDensity) { @@ -444,8 +526,10 @@ namespace OpenMD { } if (storageLayout_ & DataStorage::dslFunctional) { - fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0); - fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0); + fill(atomRowData.functional.begin(), atomRowData.functional.end(), + 0.0); + fill(atomColData.functional.begin(), atomColData.functional.end(), + 0.0); } if (storageLayout_ & DataStorage::dslFunctionalDerivative) { @@ -455,8 +539,37 @@ namespace OpenMD { atomColData.functionalDerivative.end(), 0.0); } -#else - + if (storageLayout_ & DataStorage::dslSkippedCharge) { + fill(atomRowData.skippedCharge.begin(), + atomRowData.skippedCharge.end(), 0.0); + fill(atomColData.skippedCharge.begin(), + atomColData.skippedCharge.end(), 0.0); + } + + if (storageLayout_ & DataStorage::dslFlucQForce) { + fill(atomRowData.flucQFrc.begin(), + atomRowData.flucQFrc.end(), 0.0); + fill(atomColData.flucQFrc.begin(), + atomColData.flucQFrc.end(), 0.0); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + fill(atomRowData.electricField.begin(), + atomRowData.electricField.end(), V3Zero); + fill(atomColData.electricField.begin(), + atomColData.electricField.end(), V3Zero); + } + + if (storageLayout_ & DataStorage::dslFlucQForce) { + fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(), + 0.0); + fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(), + 0.0); + } + +#endif + // even in parallel, we need to zero out the local arrays: + if (storageLayout_ & DataStorage::dslParticlePot) { fill(snap_->atomData.particlePot.begin(), snap_->atomData.particlePot.end(), 0.0); @@ -466,16 +579,26 @@ namespace OpenMD { fill(snap_->atomData.density.begin(), snap_->atomData.density.end(), 0.0); } + if (storageLayout_ & DataStorage::dslFunctional) { fill(snap_->atomData.functional.begin(), snap_->atomData.functional.end(), 0.0); } + if (storageLayout_ & DataStorage::dslFunctionalDerivative) { fill(snap_->atomData.functionalDerivative.begin(), snap_->atomData.functionalDerivative.end(), 0.0); } -#endif - + + if (storageLayout_ & DataStorage::dslSkippedCharge) { + fill(snap_->atomData.skippedCharge.begin(), + snap_->atomData.skippedCharge.end(), 0.0); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + fill(snap_->atomData.electricField.begin(), + snap_->atomData.electricField.end(), V3Zero); + } } @@ -485,32 +608,55 @@ namespace OpenMD { #ifdef IS_MPI // gather up the atomic positions - AtomCommVectorRow->gather(snap_->atomData.position, + AtomPlanVectorRow->gather(snap_->atomData.position, atomRowData.position); - AtomCommVectorColumn->gather(snap_->atomData.position, + AtomPlanVectorColumn->gather(snap_->atomData.position, atomColData.position); // gather up the cutoff group positions - cgCommVectorRow->gather(snap_->cgData.position, + + cgPlanVectorRow->gather(snap_->cgData.position, cgRowData.position); - cgCommVectorColumn->gather(snap_->cgData.position, + + cgPlanVectorColumn->gather(snap_->cgData.position, cgColData.position); + + + + if (needVelocities_) { + // gather up the atomic velocities + AtomPlanVectorColumn->gather(snap_->atomData.velocity, + atomColData.velocity); + + cgPlanVectorColumn->gather(snap_->cgData.velocity, + cgColData.velocity); + } + // if needed, gather the atomic rotation matrices if (storageLayout_ & DataStorage::dslAmat) { - AtomCommMatrixRow->gather(snap_->atomData.aMat, + AtomPlanMatrixRow->gather(snap_->atomData.aMat, atomRowData.aMat); - AtomCommMatrixColumn->gather(snap_->atomData.aMat, + AtomPlanMatrixColumn->gather(snap_->atomData.aMat, atomColData.aMat); } // if needed, gather the atomic eletrostatic frames if (storageLayout_ & DataStorage::dslElectroFrame) { - AtomCommMatrixRow->gather(snap_->atomData.electroFrame, + AtomPlanMatrixRow->gather(snap_->atomData.electroFrame, atomRowData.electroFrame); - AtomCommMatrixColumn->gather(snap_->atomData.electroFrame, + AtomPlanMatrixColumn->gather(snap_->atomData.electroFrame, atomColData.electroFrame); } + + // if needed, gather the atomic fluctuating charge values + if (storageLayout_ & DataStorage::dslFlucQPosition) { + AtomPlanRealRow->gather(snap_->atomData.flucQPos, + atomRowData.flucQPos); + AtomPlanRealColumn->gather(snap_->atomData.flucQPos, + atomColData.flucQPos); + } + #endif } @@ -524,15 +670,27 @@ namespace OpenMD { if (storageLayout_ & DataStorage::dslDensity) { - AtomCommRealRow->scatter(atomRowData.density, + AtomPlanRealRow->scatter(atomRowData.density, snap_->atomData.density); int n = snap_->atomData.density.size(); vector rho_tmp(n, 0.0); - AtomCommRealColumn->scatter(atomColData.density, rho_tmp); + AtomPlanRealColumn->scatter(atomColData.density, rho_tmp); for (int i = 0; i < n; i++) snap_->atomData.density[i] += rho_tmp[i]; } + + if (storageLayout_ & DataStorage::dslElectricField) { + + AtomPlanVectorRow->scatter(atomRowData.electricField, + snap_->atomData.electricField); + + int n = snap_->atomData.electricField.size(); + vector field_tmp(n, V3Zero); + AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp); + for (int i = 0; i < n; i++) + snap_->atomData.electricField[i] += field_tmp[i]; + } #endif } @@ -545,16 +703,16 @@ namespace OpenMD { storageLayout_ = sman_->getStorageLayout(); #ifdef IS_MPI if (storageLayout_ & DataStorage::dslFunctional) { - AtomCommRealRow->gather(snap_->atomData.functional, + AtomPlanRealRow->gather(snap_->atomData.functional, atomRowData.functional); - AtomCommRealColumn->gather(snap_->atomData.functional, + AtomPlanRealColumn->gather(snap_->atomData.functional, atomColData.functional); } if (storageLayout_ & DataStorage::dslFunctionalDerivative) { - AtomCommRealRow->gather(snap_->atomData.functionalDerivative, + AtomPlanRealRow->gather(snap_->atomData.functionalDerivative, atomRowData.functionalDerivative); - AtomCommRealColumn->gather(snap_->atomData.functionalDerivative, + AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative, atomColData.functionalDerivative); } #endif @@ -568,56 +726,200 @@ namespace OpenMD { int n = snap_->atomData.force.size(); vector frc_tmp(n, V3Zero); - AtomCommVectorRow->scatter(atomRowData.force, frc_tmp); + AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp); for (int i = 0; i < n; i++) { snap_->atomData.force[i] += frc_tmp[i]; frc_tmp[i] = 0.0; } - AtomCommVectorColumn->scatter(atomColData.force, frc_tmp); - for (int i = 0; i < n; i++) + AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp); + for (int i = 0; i < n; i++) { snap_->atomData.force[i] += frc_tmp[i]; - - + } + if (storageLayout_ & DataStorage::dslTorque) { - int nt = snap_->atomData.force.size(); + int nt = snap_->atomData.torque.size(); vector trq_tmp(nt, V3Zero); - AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp); - for (int i = 0; i < n; i++) { + AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp); + for (int i = 0; i < nt; i++) { snap_->atomData.torque[i] += trq_tmp[i]; trq_tmp[i] = 0.0; } - AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp); - for (int i = 0; i < n; i++) + AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp); + for (int i = 0; i < nt; i++) snap_->atomData.torque[i] += trq_tmp[i]; + } + + if (storageLayout_ & DataStorage::dslSkippedCharge) { + + int ns = snap_->atomData.skippedCharge.size(); + vector skch_tmp(ns, 0.0); + + AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp); + for (int i = 0; i < ns; i++) { + snap_->atomData.skippedCharge[i] += skch_tmp[i]; + skch_tmp[i] = 0.0; + } + + AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); + for (int i = 0; i < ns; i++) + snap_->atomData.skippedCharge[i] += skch_tmp[i]; + } + if (storageLayout_ & DataStorage::dslFlucQForce) { + + int nq = snap_->atomData.flucQFrc.size(); + vector fqfrc_tmp(nq, 0.0); + + AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp); + for (int i = 0; i < nq; i++) { + snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; + fqfrc_tmp[i] = 0.0; + } + + AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp); + for (int i = 0; i < nq; i++) + snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; + + } + nLocal_ = snap_->getNumberOfAtoms(); vector pot_temp(nLocal_, Vector (0.0)); + vector expot_temp(nLocal_, + Vector (0.0)); // scatter/gather pot_row into the members of my column - AtomCommPotRow->scatter(pot_row, pot_temp); + AtomPlanPotRow->scatter(pot_row, pot_temp); + AtomPlanPotRow->scatter(expot_row, expot_temp); - for (int ii = 0; ii < pot_temp.size(); ii++ ) + for (int ii = 0; ii < pot_temp.size(); ii++ ) pairwisePot += pot_temp[ii]; - + + for (int ii = 0; ii < expot_temp.size(); ii++ ) + excludedPot += expot_temp[ii]; + + if (storageLayout_ & DataStorage::dslParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In single processor, this is done in + // unpackInteractionData, not in collectData. + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + for (int i = 0; i < nLocal_; i++) { + // factor of two is because the total potential terms are divided + // by 2 in parallel due to row/ column scatter + snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); + } + } + } + fill(pot_temp.begin(), pot_temp.end(), Vector (0.0)); + fill(expot_temp.begin(), expot_temp.end(), + Vector (0.0)); - AtomCommPotColumn->scatter(pot_col, pot_temp); + AtomPlanPotColumn->scatter(pot_col, pot_temp); + AtomPlanPotColumn->scatter(expot_col, expot_temp); for (int ii = 0; ii < pot_temp.size(); ii++ ) pairwisePot += pot_temp[ii]; + + for (int ii = 0; ii < expot_temp.size(); ii++ ) + excludedPot += expot_temp[ii]; + + if (storageLayout_ & DataStorage::dslParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In single processor, this is done in + // unpackInteractionData, not in collectData. + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + for (int i = 0; i < nLocal_; i++) { + // factor of two is because the total potential terms are divided + // by 2 in parallel due to row/ column scatter + snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); + } + } + } + + if (storageLayout_ & DataStorage::dslParticlePot) { + int npp = snap_->atomData.particlePot.size(); + vector ppot_temp(npp, 0.0); + + // This is the direct or embedding contribution to the particle + // pot. + + AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp); + for (int i = 0; i < npp; i++) { + snap_->atomData.particlePot[i] += ppot_temp[i]; + } + + fill(ppot_temp.begin(), ppot_temp.end(), 0.0); + + AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp); + for (int i = 0; i < npp; i++) { + snap_->atomData.particlePot[i] += ppot_temp[i]; + } + } + + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = pairwisePot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + pairwisePot[ii] = ploc2; + } + + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = excludedPot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + excludedPot[ii] = ploc2; + } + + // Here be dragons. + MPI::Intracomm col = colComm.getComm(); + + col.Allreduce(MPI::IN_PLACE, + &snap_->frameData.conductiveHeatFlux[0], 3, + MPI::REALTYPE, MPI::SUM); + + #endif } + /** + * Collects information obtained during the post-pair (and embedding + * functional) loops onto local data structures. + */ + void ForceMatrixDecomposition::collectSelfData() { + snap_ = sman_->getCurrentSnapshot(); + storageLayout_ = sman_->getStorageLayout(); + +#ifdef IS_MPI + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = embeddingPot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + embeddingPot[ii] = ploc2; + } + for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { + RealType ploc1 = excludedSelfPot[ii]; + RealType ploc2 = 0.0; + MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); + excludedSelfPot[ii] = ploc2; + } +#endif + + } + + + int ForceMatrixDecomposition::getNAtomsInRow() { #ifdef IS_MPI return nAtomsInRow_; @@ -658,7 +960,23 @@ namespace OpenMD { return d; } + Vector3d ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){ +#ifdef IS_MPI + return cgColData.velocity[cg2]; +#else + return snap_->cgData.velocity[cg2]; +#endif + } + Vector3d ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){ +#ifdef IS_MPI + return atomColData.velocity[atom2]; +#else + return snap_->atomData.velocity[atom2]; +#endif + } + + Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ Vector3d d; @@ -716,43 +1034,67 @@ namespace OpenMD { return d; } - vector ForceMatrixDecomposition::getSkipsForAtom(int atom1) { - return skipsForAtom[atom1]; + vector ForceMatrixDecomposition::getExcludesForAtom(int atom1) { + return excludesForAtom[atom1]; } /** - * There are a number of reasons to skip a pair or a - * particle. Mostly we do this to exclude atoms who are involved in - * short range interactions (bonds, bends, torsions), but we also - * need to exclude some overcounted interactions that result from + * We need to exclude some overcounted interactions that result from * the parallel decomposition. */ - bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { - int unique_id_1, unique_id_2; - + bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) { + int unique_id_1, unique_id_2, group1, group2; + #ifdef IS_MPI // in MPI, we have to look up the unique IDs for each atom unique_id_1 = AtomRowToGlobal[atom1]; unique_id_2 = AtomColToGlobal[atom2]; + group1 = cgRowToGlobal[cg1]; + group2 = cgColToGlobal[cg2]; +#else + unique_id_1 = AtomLocalToGlobal[atom1]; + unique_id_2 = AtomLocalToGlobal[atom2]; + group1 = cgLocalToGlobal[cg1]; + group2 = cgLocalToGlobal[cg2]; +#endif - // this situation should only arise in MPI simulations if (unique_id_1 == unique_id_2) return true; - + +#ifdef IS_MPI // this prevents us from doing the pair on multiple processors if (unique_id_1 < unique_id_2) { if ((unique_id_1 + unique_id_2) % 2 == 0) return true; } else { - if ((unique_id_1 + unique_id_2) % 2 == 1) return true; + if ((unique_id_1 + unique_id_2) % 2 == 1) return true; + } +#endif + +#ifndef IS_MPI + if (group1 == group2) { + if (unique_id_1 < unique_id_2) return true; } -#else - // in the normal loop, the atom numbers are unique - unique_id_1 = atom1; - unique_id_2 = atom2; #endif - for (vector::iterator i = skipsForAtom[atom1].begin(); - i != skipsForAtom[atom1].end(); ++i) { - if ( (*i) == unique_id_2 ) return true; + return false; + } + + /** + * We need to handle the interactions for atoms who are involved in + * the same rigid body as well as some short range interactions + * (bonds, bends, torsions) differently from other interactions. + * We'll still visit the pairwise routines, but with a flag that + * tells those routines to exclude the pair from direct long range + * interactions. Some indirect interactions (notably reaction + * field) must still be handled for these pairs. + */ + bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { + + // excludesForAtom was constructed to use row/column indices in the MPI + // version, and to use local IDs in the non-MPI version: + + for (vector::iterator i = excludesForAtom[atom1].begin(); + i != excludesForAtom[atom1].end(); ++i) { + if ( (*i) == atom2 ) return true; } return false; @@ -777,12 +1119,15 @@ namespace OpenMD { // filling interaction blocks with pointers void ForceMatrixDecomposition::fillInteractionData(InteractionData &idat, - int atom1, int atom2) { + int atom1, int atom2) { + + idat.excluded = excludeAtomPair(atom1, atom2); + #ifdef IS_MPI + idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]); + //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]), + // ff_->getAtomType(identsCol[atom2]) ); - idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]), - ff_->getAtomType(identsCol[atom2]) ); - if (storageLayout_ & DataStorage::dslAmat) { idat.A1 = &(atomRowData.aMat[atom1]); idat.A2 = &(atomColData.aMat[atom2]); @@ -818,11 +1163,20 @@ namespace OpenMD { idat.particlePot2 = &(atomColData.particlePot[atom2]); } -#else + if (storageLayout_ & DataStorage::dslSkippedCharge) { + idat.skippedCharge1 = &(atomRowData.skippedCharge[atom1]); + idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]); + } - idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), - ff_->getAtomType(idents[atom2]) ); + if (storageLayout_ & DataStorage::dslFlucQPosition) { + idat.flucQ1 = &(atomRowData.flucQPos[atom1]); + idat.flucQ2 = &(atomColData.flucQPos[atom2]); + } +#else + + idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]); + if (storageLayout_ & DataStorage::dslAmat) { idat.A1 = &(snap_->atomData.aMat[atom1]); idat.A2 = &(snap_->atomData.aMat[atom2]); @@ -858,69 +1212,70 @@ namespace OpenMD { idat.particlePot2 = &(snap_->atomData.particlePot[atom2]); } + if (storageLayout_ & DataStorage::dslSkippedCharge) { + idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]); + idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]); + } + + if (storageLayout_ & DataStorage::dslFlucQPosition) { + idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]); + idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]); + } + #endif } void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) { #ifdef IS_MPI - pot_row[atom1] += 0.5 * *(idat.pot); - pot_col[atom2] += 0.5 * *(idat.pot); + pot_row[atom1] += RealType(0.5) * *(idat.pot); + pot_col[atom2] += RealType(0.5) * *(idat.pot); + expot_row[atom1] += RealType(0.5) * *(idat.excludedPot); + expot_col[atom2] += RealType(0.5) * *(idat.excludedPot); atomRowData.force[atom1] += *(idat.f1); atomColData.force[atom2] -= *(idat.f1); + + if (storageLayout_ & DataStorage::dslFlucQForce) { + atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1); + atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2); + } + + if (storageLayout_ & DataStorage::dslElectricField) { + atomRowData.electricField[atom1] += *(idat.eField1); + atomColData.electricField[atom2] += *(idat.eField2); + } + #else pairwisePot += *(idat.pot); + excludedPot += *(idat.excludedPot); snap_->atomData.force[atom1] += *(idat.f1); snap_->atomData.force[atom2] -= *(idat.f1); -#endif - } - - - void ForceMatrixDecomposition::fillSkipData(InteractionData &idat, - int atom1, int atom2) { - // Still Missing:: skippedCharge fill must be added to DataStorage -#ifdef IS_MPI - idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]), - ff_->getAtomType(identsCol[atom2]) ); - - if (storageLayout_ & DataStorage::dslElectroFrame) { - idat.eFrame1 = &(atomRowData.electroFrame[atom1]); - idat.eFrame2 = &(atomColData.electroFrame[atom2]); + if (idat.doParticlePot) { + // This is the pairwise contribution to the particle pot. The + // embedding contribution is added in each of the low level + // non-bonded routines. In parallel, this calculation is done + // in collectData, not in unpackInteractionData. + snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw); + snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw); } - if (storageLayout_ & DataStorage::dslTorque) { - idat.t1 = &(atomRowData.torque[atom1]); - idat.t2 = &(atomColData.torque[atom2]); + + if (storageLayout_ & DataStorage::dslFlucQForce) { + snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1); + snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2); } -#else - idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), - ff_->getAtomType(idents[atom2]) ); - if (storageLayout_ & DataStorage::dslElectroFrame) { - idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]); - idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]); + if (storageLayout_ & DataStorage::dslElectricField) { + snap_->atomData.electricField[atom1] += *(idat.eField1); + snap_->atomData.electricField[atom2] += *(idat.eField2); } - if (storageLayout_ & DataStorage::dslTorque) { - idat.t1 = &(snap_->atomData.torque[atom1]); - idat.t2 = &(snap_->atomData.torque[atom2]); - } -#endif - } - - void ForceMatrixDecomposition::unpackSkipData(InteractionData &idat, int atom1, int atom2) { -#ifdef IS_MPI - pot_row[atom1] += 0.5 * *(idat.pot); - pot_col[atom2] += 0.5 * *(idat.pot); -#else - pairwisePot += *(idat.pot); #endif - + } - /* * buildNeighborList * @@ -931,6 +1286,8 @@ namespace OpenMD { vector > neighborList; groupCutoffs cuts; + bool doAllPairs = false; + #ifdef IS_MPI cellListRow_.clear(); cellListCol_.clear(); @@ -950,6 +1307,12 @@ namespace OpenMD { nCells_.y() = (int) ( Hy.length() )/ rList_; nCells_.z() = (int) ( Hz.length() )/ rList_; + // handle small boxes where the cell offsets can end up repeating cells + + if (nCells_.x() < 3) doAllPairs = true; + if (nCells_.y() < 3) doAllPairs = true; + if (nCells_.z() < 3) doAllPairs = true; + Mat3x3d invHmat = snap_->getInvHmat(); Vector3d rs, scaled, dr; Vector3i whichCell; @@ -963,167 +1326,203 @@ namespace OpenMD { cellList_.resize(nCtot); #endif + if (!doAllPairs) { #ifdef IS_MPI - for (int i = 0; i < nGroupsInRow_; i++) { - rs = cgRowData.position[i]; - // scaled positions relative to the box vectors - scaled = invHmat * rs; - - // wrap the vector back into the unit box by subtracting integer box - // numbers - for (int j = 0; j < 3; j++) { - scaled[j] -= roundMe(scaled[j]); - scaled[j] += 0.5; + for (int i = 0; i < nGroupsInRow_; i++) { + rs = cgRowData.position[i]; + + // scaled positions relative to the box vectors + scaled = invHmat * rs; + + // wrap the vector back into the unit box by subtracting integer box + // numbers + for (int j = 0; j < 3; j++) { + scaled[j] -= roundMe(scaled[j]); + scaled[j] += 0.5; + } + + // find xyz-indices of cell that cutoffGroup is in. + whichCell.x() = nCells_.x() * scaled.x(); + whichCell.y() = nCells_.y() * scaled.y(); + whichCell.z() = nCells_.z() * scaled.z(); + + // find single index of this cell: + cellIndex = Vlinear(whichCell, nCells_); + + // add this cutoff group to the list of groups in this cell; + cellListRow_[cellIndex].push_back(i); + } + for (int i = 0; i < nGroupsInCol_; i++) { + rs = cgColData.position[i]; + + // scaled positions relative to the box vectors + scaled = invHmat * rs; + + // wrap the vector back into the unit box by subtracting integer box + // numbers + for (int j = 0; j < 3; j++) { + scaled[j] -= roundMe(scaled[j]); + scaled[j] += 0.5; + } + + // find xyz-indices of cell that cutoffGroup is in. + whichCell.x() = nCells_.x() * scaled.x(); + whichCell.y() = nCells_.y() * scaled.y(); + whichCell.z() = nCells_.z() * scaled.z(); + + // find single index of this cell: + cellIndex = Vlinear(whichCell, nCells_); + + // add this cutoff group to the list of groups in this cell; + cellListCol_[cellIndex].push_back(i); } - // find xyz-indices of cell that cutoffGroup is in. - whichCell.x() = nCells_.x() * scaled.x(); - whichCell.y() = nCells_.y() * scaled.y(); - whichCell.z() = nCells_.z() * scaled.z(); - - // find single index of this cell: - cellIndex = Vlinear(whichCell, nCells_); - - // add this cutoff group to the list of groups in this cell; - cellListRow_[cellIndex].push_back(i); - } - - for (int i = 0; i < nGroupsInCol_; i++) { - rs = cgColData.position[i]; - - // scaled positions relative to the box vectors - scaled = invHmat * rs; - - // wrap the vector back into the unit box by subtracting integer box - // numbers - for (int j = 0; j < 3; j++) { - scaled[j] -= roundMe(scaled[j]); - scaled[j] += 0.5; - } - - // find xyz-indices of cell that cutoffGroup is in. - whichCell.x() = nCells_.x() * scaled.x(); - whichCell.y() = nCells_.y() * scaled.y(); - whichCell.z() = nCells_.z() * scaled.z(); - - // find single index of this cell: - cellIndex = Vlinear(whichCell, nCells_); - - // add this cutoff group to the list of groups in this cell; - cellListCol_[cellIndex].push_back(i); - } #else - for (int i = 0; i < nGroups_; i++) { - rs = snap_->cgData.position[i]; - - // scaled positions relative to the box vectors - scaled = invHmat * rs; - - // wrap the vector back into the unit box by subtracting integer box - // numbers - for (int j = 0; j < 3; j++) { - scaled[j] -= roundMe(scaled[j]); - scaled[j] += 0.5; + for (int i = 0; i < nGroups_; i++) { + rs = snap_->cgData.position[i]; + + // scaled positions relative to the box vectors + scaled = invHmat * rs; + + // wrap the vector back into the unit box by subtracting integer box + // numbers + for (int j = 0; j < 3; j++) { + scaled[j] -= roundMe(scaled[j]); + scaled[j] += 0.5; + } + + // find xyz-indices of cell that cutoffGroup is in. + whichCell.x() = nCells_.x() * scaled.x(); + whichCell.y() = nCells_.y() * scaled.y(); + whichCell.z() = nCells_.z() * scaled.z(); + + // find single index of this cell: + cellIndex = Vlinear(whichCell, nCells_); + + // add this cutoff group to the list of groups in this cell; + cellList_[cellIndex].push_back(i); } - // find xyz-indices of cell that cutoffGroup is in. - whichCell.x() = nCells_.x() * scaled.x(); - whichCell.y() = nCells_.y() * scaled.y(); - whichCell.z() = nCells_.z() * scaled.z(); - - // find single index of this cell: - cellIndex = Vlinear(whichCell, nCells_); - - // add this cutoff group to the list of groups in this cell; - cellList_[cellIndex].push_back(i); - } #endif - for (int m1z = 0; m1z < nCells_.z(); m1z++) { - for (int m1y = 0; m1y < nCells_.y(); m1y++) { - for (int m1x = 0; m1x < nCells_.x(); m1x++) { - Vector3i m1v(m1x, m1y, m1z); - int m1 = Vlinear(m1v, nCells_); - - for (vector::iterator os = cellOffsets_.begin(); - os != cellOffsets_.end(); ++os) { + for (int m1z = 0; m1z < nCells_.z(); m1z++) { + for (int m1y = 0; m1y < nCells_.y(); m1y++) { + for (int m1x = 0; m1x < nCells_.x(); m1x++) { + Vector3i m1v(m1x, m1y, m1z); + int m1 = Vlinear(m1v, nCells_); - Vector3i m2v = m1v + (*os); - - if (m2v.x() >= nCells_.x()) { - m2v.x() = 0; - } else if (m2v.x() < 0) { - m2v.x() = nCells_.x() - 1; - } - - if (m2v.y() >= nCells_.y()) { - m2v.y() = 0; - } else if (m2v.y() < 0) { - m2v.y() = nCells_.y() - 1; - } - - if (m2v.z() >= nCells_.z()) { - m2v.z() = 0; - } else if (m2v.z() < 0) { - m2v.z() = nCells_.z() - 1; - } - - int m2 = Vlinear (m2v, nCells_); + for (vector::iterator os = cellOffsets_.begin(); + os != cellOffsets_.end(); ++os) { + + Vector3i m2v = m1v + (*os); + -#ifdef IS_MPI - for (vector::iterator j1 = cellListRow_[m1].begin(); - j1 != cellListRow_[m1].end(); ++j1) { - for (vector::iterator j2 = cellListCol_[m2].begin(); - j2 != cellListCol_[m2].end(); ++j2) { - - // Always do this if we're in different cells or if - // we're in the same cell and the global index of the - // j2 cutoff group is less than the j1 cutoff group + if (m2v.x() >= nCells_.x()) { + m2v.x() = 0; + } else if (m2v.x() < 0) { + m2v.x() = nCells_.x() - 1; + } + + if (m2v.y() >= nCells_.y()) { + m2v.y() = 0; + } else if (m2v.y() < 0) { + m2v.y() = nCells_.y() - 1; + } + + if (m2v.z() >= nCells_.z()) { + m2v.z() = 0; + } else if (m2v.z() < 0) { + m2v.z() = nCells_.z() - 1; + } - if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) { + int m2 = Vlinear (m2v, nCells_); + +#ifdef IS_MPI + for (vector::iterator j1 = cellListRow_[m1].begin(); + j1 != cellListRow_[m1].end(); ++j1) { + for (vector::iterator j2 = cellListCol_[m2].begin(); + j2 != cellListCol_[m2].end(); ++j2) { + + // In parallel, we need to visit *all* pairs of row + // & column indicies and will divide labor in the + // force evaluation later. dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)]; snap_->wrapVector(dr); cuts = getGroupCutoffs( (*j1), (*j2) ); if (dr.lengthSquare() < cuts.third) { neighborList.push_back(make_pair((*j1), (*j2))); - } + } } } - } #else + for (vector::iterator j1 = cellList_[m1].begin(); + j1 != cellList_[m1].end(); ++j1) { + for (vector::iterator j2 = cellList_[m2].begin(); + j2 != cellList_[m2].end(); ++j2) { + + // Always do this if we're in different cells or if + // we're in the same cell and the global index of + // the j2 cutoff group is greater than or equal to + // the j1 cutoff group. Note that Rappaport's code + // has a "less than" conditional here, but that + // deals with atom-by-atom computation. OpenMD + // allows atoms within a single cutoff group to + // interact with each other. - for (vector::iterator j1 = cellList_[m1].begin(); - j1 != cellList_[m1].end(); ++j1) { - for (vector::iterator j2 = cellList_[m2].begin(); - j2 != cellList_[m2].end(); ++j2) { - // Always do this if we're in different cells or if - // we're in the same cell and the global index of the - // j2 cutoff group is less than the j1 cutoff group - if (m2 != m1 || (*j2) < (*j1)) { - dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; - snap_->wrapVector(dr); - cuts = getGroupCutoffs( (*j1), (*j2) ); - if (dr.lengthSquare() < cuts.third) { - neighborList.push_back(make_pair((*j1), (*j2))); + if (m2 != m1 || (*j2) >= (*j1) ) { + + dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; + snap_->wrapVector(dr); + cuts = getGroupCutoffs( (*j1), (*j2) ); + if (dr.lengthSquare() < cuts.third) { + neighborList.push_back(make_pair((*j1), (*j2))); + } } } } - } #endif + } } } } + } else { + // branch to do all cutoff group pairs +#ifdef IS_MPI + for (int j1 = 0; j1 < nGroupsInRow_; j1++) { + for (int j2 = 0; j2 < nGroupsInCol_; j2++) { + dr = cgColData.position[j2] - cgRowData.position[j1]; + snap_->wrapVector(dr); + cuts = getGroupCutoffs( j1, j2 ); + if (dr.lengthSquare() < cuts.third) { + neighborList.push_back(make_pair(j1, j2)); + } + } + } +#else + // include all groups here. + for (int j1 = 0; j1 < nGroups_; j1++) { + // include self group interactions j2 == j1 + for (int j2 = j1; j2 < nGroups_; j2++) { + dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; + snap_->wrapVector(dr); + cuts = getGroupCutoffs( j1, j2 ); + if (dr.lengthSquare() < cuts.third) { + neighborList.push_back(make_pair(j1, j2)); + } + } + } +#endif } - + // save the local cutoff group positions for the check that is // done on each loop: saved_CG_positions_.clear(); for (int i = 0; i < nGroups_; i++) saved_CG_positions_.push_back(snap_->cgData.position[i]); - + return neighborList; } } //end namespace OpenMD