| 36 |  | * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | 
| 37 |  | * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | 
| 38 |  | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 | < | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 39 | > | * [4]  Kuang & Gezelter,  J. Chem. Phys. 133, 164101 (2010). | 
| 40 | > | * [5]  Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). | 
| 41 |  | */ | 
| 42 |  | #include "parallel/ForceMatrixDecomposition.hpp" | 
| 43 |  | #include "math/SquareMatrix3.hpp" | 
| 48 |  | using namespace std; | 
| 49 |  | namespace OpenMD { | 
| 50 |  |  | 
| 51 | + | ForceMatrixDecomposition::ForceMatrixDecomposition(SimInfo* info, InteractionManager* iMan) : ForceDecomposition(info, iMan) { | 
| 52 | + |  | 
| 53 | + | // In a parallel computation, row and colum scans must visit all | 
| 54 | + | // surrounding cells (not just the 14 upper triangular blocks that | 
| 55 | + | // are used when the processor can see all pairs) | 
| 56 | + | #ifdef IS_MPI | 
| 57 | + | cellOffsets_.clear(); | 
| 58 | + | cellOffsets_.push_back( Vector3i(-1,-1,-1) ); | 
| 59 | + | cellOffsets_.push_back( Vector3i( 0,-1,-1) ); | 
| 60 | + | cellOffsets_.push_back( Vector3i( 1,-1,-1) ); | 
| 61 | + | cellOffsets_.push_back( Vector3i(-1, 0,-1) ); | 
| 62 | + | cellOffsets_.push_back( Vector3i( 0, 0,-1) ); | 
| 63 | + | cellOffsets_.push_back( Vector3i( 1, 0,-1) ); | 
| 64 | + | cellOffsets_.push_back( Vector3i(-1, 1,-1) ); | 
| 65 | + | cellOffsets_.push_back( Vector3i( 0, 1,-1) ); | 
| 66 | + | cellOffsets_.push_back( Vector3i( 1, 1,-1) ); | 
| 67 | + | cellOffsets_.push_back( Vector3i(-1,-1, 0) ); | 
| 68 | + | cellOffsets_.push_back( Vector3i( 0,-1, 0) ); | 
| 69 | + | cellOffsets_.push_back( Vector3i( 1,-1, 0) ); | 
| 70 | + | cellOffsets_.push_back( Vector3i(-1, 0, 0) ); | 
| 71 | + | cellOffsets_.push_back( Vector3i( 0, 0, 0) ); | 
| 72 | + | cellOffsets_.push_back( Vector3i( 1, 0, 0) ); | 
| 73 | + | cellOffsets_.push_back( Vector3i(-1, 1, 0) ); | 
| 74 | + | cellOffsets_.push_back( Vector3i( 0, 1, 0) ); | 
| 75 | + | cellOffsets_.push_back( Vector3i( 1, 1, 0) ); | 
| 76 | + | cellOffsets_.push_back( Vector3i(-1,-1, 1) ); | 
| 77 | + | cellOffsets_.push_back( Vector3i( 0,-1, 1) ); | 
| 78 | + | cellOffsets_.push_back( Vector3i( 1,-1, 1) ); | 
| 79 | + | cellOffsets_.push_back( Vector3i(-1, 0, 1) ); | 
| 80 | + | cellOffsets_.push_back( Vector3i( 0, 0, 1) ); | 
| 81 | + | cellOffsets_.push_back( Vector3i( 1, 0, 1) ); | 
| 82 | + | cellOffsets_.push_back( Vector3i(-1, 1, 1) ); | 
| 83 | + | cellOffsets_.push_back( Vector3i( 0, 1, 1) ); | 
| 84 | + | cellOffsets_.push_back( Vector3i( 1, 1, 1) ); | 
| 85 | + | #endif | 
| 86 | + | } | 
| 87 | + |  | 
| 88 | + |  | 
| 89 |  | /** | 
| 90 |  | * distributeInitialData is essentially a copy of the older fortran | 
| 91 |  | * SimulationSetup | 
| 92 |  | */ | 
| 54 | – |  | 
| 93 |  | void ForceMatrixDecomposition::distributeInitialData() { | 
| 94 |  | snap_ = sman_->getCurrentSnapshot(); | 
| 95 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 96 |  | ff_ = info_->getForceField(); | 
| 97 |  | nLocal_ = snap_->getNumberOfAtoms(); | 
| 98 | < |  | 
| 98 | > |  | 
| 99 |  | nGroups_ = info_->getNLocalCutoffGroups(); | 
| 100 |  | // gather the information for atomtype IDs (atids): | 
| 101 |  | idents = info_->getIdentArray(); | 
| 109 |  | PairList* oneTwo = info_->getOneTwoInteractions(); | 
| 110 |  | PairList* oneThree = info_->getOneThreeInteractions(); | 
| 111 |  | PairList* oneFour = info_->getOneFourInteractions(); | 
| 112 | < |  | 
| 112 | > |  | 
| 113 | > | if (needVelocities_) | 
| 114 | > | snap_->cgData.setStorageLayout(DataStorage::dslPosition | | 
| 115 | > | DataStorage::dslVelocity); | 
| 116 | > | else | 
| 117 | > | snap_->cgData.setStorageLayout(DataStorage::dslPosition); | 
| 118 | > |  | 
| 119 |  | #ifdef IS_MPI | 
| 120 |  |  | 
| 121 | < | AtomCommIntRow = new Communicator<Row,int>(nLocal_); | 
| 122 | < | AtomCommRealRow = new Communicator<Row,RealType>(nLocal_); | 
| 79 | < | AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal_); | 
| 80 | < | AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal_); | 
| 81 | < | AtomCommPotRow = new Communicator<Row,potVec>(nLocal_); | 
| 121 | > | MPI::Intracomm row = rowComm.getComm(); | 
| 122 | > | MPI::Intracomm col = colComm.getComm(); | 
| 123 |  |  | 
| 124 | < | AtomCommIntColumn = new Communicator<Column,int>(nLocal_); | 
| 125 | < | AtomCommRealColumn = new Communicator<Column,RealType>(nLocal_); | 
| 126 | < | AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal_); | 
| 127 | < | AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal_); | 
| 128 | < | AtomCommPotColumn = new Communicator<Column,potVec>(nLocal_); | 
| 124 | > | AtomPlanIntRow = new Plan<int>(row, nLocal_); | 
| 125 | > | AtomPlanRealRow = new Plan<RealType>(row, nLocal_); | 
| 126 | > | AtomPlanVectorRow = new Plan<Vector3d>(row, nLocal_); | 
| 127 | > | AtomPlanMatrixRow = new Plan<Mat3x3d>(row, nLocal_); | 
| 128 | > | AtomPlanPotRow = new Plan<potVec>(row, nLocal_); | 
| 129 |  |  | 
| 130 | < | cgCommIntRow = new Communicator<Row,int>(nGroups_); | 
| 131 | < | cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups_); | 
| 132 | < | cgCommIntColumn = new Communicator<Column,int>(nGroups_); | 
| 133 | < | cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups_); | 
| 130 | > | AtomPlanIntColumn = new Plan<int>(col, nLocal_); | 
| 131 | > | AtomPlanRealColumn = new Plan<RealType>(col, nLocal_); | 
| 132 | > | AtomPlanVectorColumn = new Plan<Vector3d>(col, nLocal_); | 
| 133 | > | AtomPlanMatrixColumn = new Plan<Mat3x3d>(col, nLocal_); | 
| 134 | > | AtomPlanPotColumn = new Plan<potVec>(col, nLocal_); | 
| 135 |  |  | 
| 136 | < | nAtomsInRow_ = AtomCommIntRow->getSize(); | 
| 137 | < | nAtomsInCol_ = AtomCommIntColumn->getSize(); | 
| 138 | < | nGroupsInRow_ = cgCommIntRow->getSize(); | 
| 139 | < | nGroupsInCol_ = cgCommIntColumn->getSize(); | 
| 136 | > | cgPlanIntRow = new Plan<int>(row, nGroups_); | 
| 137 | > | cgPlanVectorRow = new Plan<Vector3d>(row, nGroups_); | 
| 138 | > | cgPlanIntColumn = new Plan<int>(col, nGroups_); | 
| 139 | > | cgPlanVectorColumn = new Plan<Vector3d>(col, nGroups_); | 
| 140 | > |  | 
| 141 | > | nAtomsInRow_ = AtomPlanIntRow->getSize(); | 
| 142 | > | nAtomsInCol_ = AtomPlanIntColumn->getSize(); | 
| 143 | > | nGroupsInRow_ = cgPlanIntRow->getSize(); | 
| 144 | > | nGroupsInCol_ = cgPlanIntColumn->getSize(); | 
| 145 |  |  | 
| 146 |  | // Modify the data storage objects with the correct layouts and sizes: | 
| 147 |  | atomRowData.resize(nAtomsInRow_); | 
| 151 |  | cgRowData.resize(nGroupsInRow_); | 
| 152 |  | cgRowData.setStorageLayout(DataStorage::dslPosition); | 
| 153 |  | cgColData.resize(nGroupsInCol_); | 
| 154 | < | cgColData.setStorageLayout(DataStorage::dslPosition); | 
| 155 | < |  | 
| 154 | > | if (needVelocities_) | 
| 155 | > | // we only need column velocities if we need them. | 
| 156 | > | cgColData.setStorageLayout(DataStorage::dslPosition | | 
| 157 | > | DataStorage::dslVelocity); | 
| 158 | > | else | 
| 159 | > | cgColData.setStorageLayout(DataStorage::dslPosition); | 
| 160 | > |  | 
| 161 |  | identsRow.resize(nAtomsInRow_); | 
| 162 |  | identsCol.resize(nAtomsInCol_); | 
| 163 |  |  | 
| 164 | < | AtomCommIntRow->gather(idents, identsRow); | 
| 165 | < | AtomCommIntColumn->gather(idents, identsCol); | 
| 164 | > | AtomPlanIntRow->gather(idents, identsRow); | 
| 165 | > | AtomPlanIntColumn->gather(idents, identsCol); | 
| 166 |  |  | 
| 167 |  | // allocate memory for the parallel objects | 
| 168 | + | atypesRow.resize(nAtomsInRow_); | 
| 169 | + | atypesCol.resize(nAtomsInCol_); | 
| 170 | + |  | 
| 171 | + | for (int i = 0; i < nAtomsInRow_; i++) | 
| 172 | + | atypesRow[i] = ff_->getAtomType(identsRow[i]); | 
| 173 | + | for (int i = 0; i < nAtomsInCol_; i++) | 
| 174 | + | atypesCol[i] = ff_->getAtomType(identsCol[i]); | 
| 175 | + |  | 
| 176 | + | pot_row.resize(nAtomsInRow_); | 
| 177 | + | pot_col.resize(nAtomsInCol_); | 
| 178 | + |  | 
| 179 | + | expot_row.resize(nAtomsInRow_); | 
| 180 | + | expot_col.resize(nAtomsInCol_); | 
| 181 | + |  | 
| 182 |  | AtomRowToGlobal.resize(nAtomsInRow_); | 
| 183 |  | AtomColToGlobal.resize(nAtomsInCol_); | 
| 184 | + | AtomPlanIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); | 
| 185 | + | AtomPlanIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); | 
| 186 | + |  | 
| 187 |  | cgRowToGlobal.resize(nGroupsInRow_); | 
| 188 |  | cgColToGlobal.resize(nGroupsInCol_); | 
| 189 | + | cgPlanIntRow->gather(cgLocalToGlobal, cgRowToGlobal); | 
| 190 | + | cgPlanIntColumn->gather(cgLocalToGlobal, cgColToGlobal); | 
| 191 | + |  | 
| 192 |  | massFactorsRow.resize(nAtomsInRow_); | 
| 193 |  | massFactorsCol.resize(nAtomsInCol_); | 
| 194 | < | pot_row.resize(nAtomsInRow_); | 
| 195 | < | pot_col.resize(nAtomsInCol_); | 
| 194 | > | AtomPlanRealRow->gather(massFactors, massFactorsRow); | 
| 195 | > | AtomPlanRealColumn->gather(massFactors, massFactorsCol); | 
| 196 |  |  | 
| 125 | – | AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); | 
| 126 | – | AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); | 
| 127 | – |  | 
| 128 | – | cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal); | 
| 129 | – | cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal); | 
| 130 | – |  | 
| 131 | – | AtomCommRealRow->gather(massFactors, massFactorsRow); | 
| 132 | – | AtomCommRealColumn->gather(massFactors, massFactorsCol); | 
| 133 | – |  | 
| 197 |  | groupListRow_.clear(); | 
| 198 |  | groupListRow_.resize(nGroupsInRow_); | 
| 199 |  | for (int i = 0; i < nGroupsInRow_; i++) { | 
| 248 |  | } | 
| 249 |  | } | 
| 250 |  |  | 
| 251 | < | #endif | 
| 189 | < |  | 
| 190 | < | groupList_.clear(); | 
| 191 | < | groupList_.resize(nGroups_); | 
| 192 | < | for (int i = 0; i < nGroups_; i++) { | 
| 193 | < | int gid = cgLocalToGlobal[i]; | 
| 194 | < | for (int j = 0; j < nLocal_; j++) { | 
| 195 | < | int aid = AtomLocalToGlobal[j]; | 
| 196 | < | if (globalGroupMembership[aid] == gid) { | 
| 197 | < | groupList_[i].push_back(j); | 
| 198 | < | } | 
| 199 | < | } | 
| 200 | < | } | 
| 201 | < |  | 
| 251 | > | #else | 
| 252 |  | excludesForAtom.clear(); | 
| 253 |  | excludesForAtom.resize(nLocal_); | 
| 254 |  | toposForAtom.clear(); | 
| 281 |  | } | 
| 282 |  | } | 
| 283 |  | } | 
| 284 | < |  | 
| 284 | > | #endif | 
| 285 | > |  | 
| 286 | > | // allocate memory for the parallel objects | 
| 287 | > | atypesLocal.resize(nLocal_); | 
| 288 | > |  | 
| 289 | > | for (int i = 0; i < nLocal_; i++) | 
| 290 | > | atypesLocal[i] = ff_->getAtomType(idents[i]); | 
| 291 | > |  | 
| 292 | > | groupList_.clear(); | 
| 293 | > | groupList_.resize(nGroups_); | 
| 294 | > | for (int i = 0; i < nGroups_; i++) { | 
| 295 | > | int gid = cgLocalToGlobal[i]; | 
| 296 | > | for (int j = 0; j < nLocal_; j++) { | 
| 297 | > | int aid = AtomLocalToGlobal[j]; | 
| 298 | > | if (globalGroupMembership[aid] == gid) { | 
| 299 | > | groupList_[i].push_back(j); | 
| 300 | > | } | 
| 301 | > | } | 
| 302 | > | } | 
| 303 | > |  | 
| 304 | > |  | 
| 305 |  | createGtypeCutoffMap(); | 
| 306 |  |  | 
| 307 |  | } | 
| 309 |  | void ForceMatrixDecomposition::createGtypeCutoffMap() { | 
| 310 |  |  | 
| 311 |  | RealType tol = 1e-6; | 
| 312 | + | largestRcut_ = 0.0; | 
| 313 |  | RealType rc; | 
| 314 |  | int atid; | 
| 315 |  | set<AtomType*> atypes = info_->getSimulatedAtomTypes(); | 
| 316 | + |  | 
| 317 |  | map<int, RealType> atypeCutoff; | 
| 318 |  |  | 
| 319 |  | for (set<AtomType*>::iterator at = atypes.begin(); | 
| 321 |  | atid = (*at)->getIdent(); | 
| 322 |  | if (userChoseCutoff_) | 
| 323 |  | atypeCutoff[atid] = userCutoff_; | 
| 324 | < | else | 
| 324 | > | else | 
| 325 |  | atypeCutoff[atid] = interactionMan_->getSuggestedCutoffRadius(*at); | 
| 326 |  | } | 
| 327 | < |  | 
| 327 | > |  | 
| 328 |  | vector<RealType> gTypeCutoffs; | 
| 329 |  | // first we do a single loop over the cutoff groups to find the | 
| 330 |  | // largest cutoff for any atypes present in this group. | 
| 384 |  | vector<RealType> groupCutoff(nGroups_, 0.0); | 
| 385 |  | groupToGtype.resize(nGroups_); | 
| 386 |  | for (int cg1 = 0; cg1 < nGroups_; cg1++) { | 
| 315 | – |  | 
| 387 |  | groupCutoff[cg1] = 0.0; | 
| 388 |  | vector<int> atomList = getAtomsInGroupRow(cg1); | 
| 318 | – |  | 
| 389 |  | for (vector<int>::iterator ia = atomList.begin(); | 
| 390 |  | ia != atomList.end(); ++ia) { | 
| 391 |  | int atom1 = (*ia); | 
| 392 |  | atid = idents[atom1]; | 
| 393 | < | if (atypeCutoff[atid] > groupCutoff[cg1]) { | 
| 393 | > | if (atypeCutoff[atid] > groupCutoff[cg1]) | 
| 394 |  | groupCutoff[cg1] = atypeCutoff[atid]; | 
| 325 | – | } | 
| 395 |  | } | 
| 396 | < |  | 
| 396 | > |  | 
| 397 |  | bool gTypeFound = false; | 
| 398 |  | for (int gt = 0; gt < gTypeCutoffs.size(); gt++) { | 
| 399 |  | if (abs(groupCutoff[cg1] - gTypeCutoffs[gt]) < tol) { | 
| 401 |  | gTypeFound = true; | 
| 402 |  | } | 
| 403 |  | } | 
| 404 | < | if (!gTypeFound) { | 
| 404 | > | if (!gTypeFound) { | 
| 405 |  | gTypeCutoffs.push_back( groupCutoff[cg1] ); | 
| 406 |  | groupToGtype[cg1] = gTypeCutoffs.size() - 1; | 
| 407 |  | } | 
| 410 |  |  | 
| 411 |  | // Now we find the maximum group cutoff value present in the simulation | 
| 412 |  |  | 
| 413 | < | RealType groupMax = *max_element(gTypeCutoffs.begin(), gTypeCutoffs.end()); | 
| 413 | > | RealType groupMax = *max_element(gTypeCutoffs.begin(), | 
| 414 | > | gTypeCutoffs.end()); | 
| 415 |  |  | 
| 416 |  | #ifdef IS_MPI | 
| 417 | < | MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, MPI::MAX); | 
| 417 | > | MPI::COMM_WORLD.Allreduce(&groupMax, &groupMax, 1, MPI::REALTYPE, | 
| 418 | > | MPI::MAX); | 
| 419 |  | #endif | 
| 420 |  |  | 
| 421 |  | RealType tradRcut = groupMax; | 
| 445 |  |  | 
| 446 |  | pair<int,int> key = make_pair(i,j); | 
| 447 |  | gTypeCutoffMap[key].first = thisRcut; | 
| 377 | – |  | 
| 448 |  | if (thisRcut > largestRcut_) largestRcut_ = thisRcut; | 
| 379 | – |  | 
| 449 |  | gTypeCutoffMap[key].second = thisRcut*thisRcut; | 
| 381 | – |  | 
| 450 |  | gTypeCutoffMap[key].third = pow(thisRcut + skinThickness_, 2); | 
| 383 | – |  | 
| 451 |  | // sanity check | 
| 452 |  |  | 
| 453 |  | if (userChoseCutoff_) { | 
| 464 |  | } | 
| 465 |  | } | 
| 466 |  |  | 
| 400 | – |  | 
| 467 |  | groupCutoffs ForceMatrixDecomposition::getGroupCutoffs(int cg1, int cg2) { | 
| 468 |  | int i, j; | 
| 469 |  | #ifdef IS_MPI | 
| 487 |  | void ForceMatrixDecomposition::zeroWorkArrays() { | 
| 488 |  | pairwisePot = 0.0; | 
| 489 |  | embeddingPot = 0.0; | 
| 490 | + | excludedPot = 0.0; | 
| 491 | + | excludedSelfPot = 0.0; | 
| 492 |  |  | 
| 493 |  | #ifdef IS_MPI | 
| 494 |  | if (storageLayout_ & DataStorage::dslForce) { | 
| 507 |  | fill(pot_col.begin(), pot_col.end(), | 
| 508 |  | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 509 |  |  | 
| 510 | + | fill(expot_row.begin(), expot_row.end(), | 
| 511 | + | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 512 | + |  | 
| 513 | + | fill(expot_col.begin(), expot_col.end(), | 
| 514 | + | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 515 | + |  | 
| 516 |  | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 517 | < | fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), 0.0); | 
| 518 | < | fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), 0.0); | 
| 517 | > | fill(atomRowData.particlePot.begin(), atomRowData.particlePot.end(), | 
| 518 | > | 0.0); | 
| 519 | > | fill(atomColData.particlePot.begin(), atomColData.particlePot.end(), | 
| 520 | > | 0.0); | 
| 521 |  | } | 
| 522 |  |  | 
| 523 |  | if (storageLayout_ & DataStorage::dslDensity) { | 
| 526 |  | } | 
| 527 |  |  | 
| 528 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 529 | < | fill(atomRowData.functional.begin(), atomRowData.functional.end(), 0.0); | 
| 530 | < | fill(atomColData.functional.begin(), atomColData.functional.end(), 0.0); | 
| 529 | > | fill(atomRowData.functional.begin(), atomRowData.functional.end(), | 
| 530 | > | 0.0); | 
| 531 | > | fill(atomColData.functional.begin(), atomColData.functional.end(), | 
| 532 | > | 0.0); | 
| 533 |  | } | 
| 534 |  |  | 
| 535 |  | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 546 |  | atomColData.skippedCharge.end(), 0.0); | 
| 547 |  | } | 
| 548 |  |  | 
| 549 | < | #else | 
| 550 | < |  | 
| 549 | > | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 550 | > | fill(atomRowData.flucQFrc.begin(), | 
| 551 | > | atomRowData.flucQFrc.end(), 0.0); | 
| 552 | > | fill(atomColData.flucQFrc.begin(), | 
| 553 | > | atomColData.flucQFrc.end(), 0.0); | 
| 554 | > | } | 
| 555 | > |  | 
| 556 | > | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 557 | > | fill(atomRowData.electricField.begin(), | 
| 558 | > | atomRowData.electricField.end(), V3Zero); | 
| 559 | > | fill(atomColData.electricField.begin(), | 
| 560 | > | atomColData.electricField.end(), V3Zero); | 
| 561 | > | } | 
| 562 | > |  | 
| 563 | > | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 564 | > | fill(atomRowData.flucQFrc.begin(), atomRowData.flucQFrc.end(), | 
| 565 | > | 0.0); | 
| 566 | > | fill(atomColData.flucQFrc.begin(), atomColData.flucQFrc.end(), | 
| 567 | > | 0.0); | 
| 568 | > | } | 
| 569 | > |  | 
| 570 | > | #endif | 
| 571 | > | // even in parallel, we need to zero out the local arrays: | 
| 572 | > |  | 
| 573 |  | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 574 |  | fill(snap_->atomData.particlePot.begin(), | 
| 575 |  | snap_->atomData.particlePot.end(), 0.0); | 
| 579 |  | fill(snap_->atomData.density.begin(), | 
| 580 |  | snap_->atomData.density.end(), 0.0); | 
| 581 |  | } | 
| 582 | + |  | 
| 583 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 584 |  | fill(snap_->atomData.functional.begin(), | 
| 585 |  | snap_->atomData.functional.end(), 0.0); | 
| 586 |  | } | 
| 587 | + |  | 
| 588 |  | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 589 |  | fill(snap_->atomData.functionalDerivative.begin(), | 
| 590 |  | snap_->atomData.functionalDerivative.end(), 0.0); | 
| 591 |  | } | 
| 592 | + |  | 
| 593 |  | if (storageLayout_ & DataStorage::dslSkippedCharge) { | 
| 594 |  | fill(snap_->atomData.skippedCharge.begin(), | 
| 595 |  | snap_->atomData.skippedCharge.end(), 0.0); | 
| 596 |  | } | 
| 597 | < | #endif | 
| 598 | < |  | 
| 597 | > |  | 
| 598 | > | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 599 | > | fill(snap_->atomData.electricField.begin(), | 
| 600 | > | snap_->atomData.electricField.end(), V3Zero); | 
| 601 | > | } | 
| 602 |  | } | 
| 603 |  |  | 
| 604 |  |  | 
| 608 |  | #ifdef IS_MPI | 
| 609 |  |  | 
| 610 |  | // gather up the atomic positions | 
| 611 | < | AtomCommVectorRow->gather(snap_->atomData.position, | 
| 611 | > | AtomPlanVectorRow->gather(snap_->atomData.position, | 
| 612 |  | atomRowData.position); | 
| 613 | < | AtomCommVectorColumn->gather(snap_->atomData.position, | 
| 613 | > | AtomPlanVectorColumn->gather(snap_->atomData.position, | 
| 614 |  | atomColData.position); | 
| 615 |  |  | 
| 616 |  | // gather up the cutoff group positions | 
| 617 | < | cgCommVectorRow->gather(snap_->cgData.position, | 
| 617 | > |  | 
| 618 | > | cgPlanVectorRow->gather(snap_->cgData.position, | 
| 619 |  | cgRowData.position); | 
| 620 | < | cgCommVectorColumn->gather(snap_->cgData.position, | 
| 620 | > |  | 
| 621 | > | cgPlanVectorColumn->gather(snap_->cgData.position, | 
| 622 |  | cgColData.position); | 
| 623 | + |  | 
| 624 | + |  | 
| 625 | + |  | 
| 626 | + | if (needVelocities_) { | 
| 627 | + | // gather up the atomic velocities | 
| 628 | + | AtomPlanVectorColumn->gather(snap_->atomData.velocity, | 
| 629 | + | atomColData.velocity); | 
| 630 | + |  | 
| 631 | + | cgPlanVectorColumn->gather(snap_->cgData.velocity, | 
| 632 | + | cgColData.velocity); | 
| 633 | + | } | 
| 634 | + |  | 
| 635 |  |  | 
| 636 |  | // if needed, gather the atomic rotation matrices | 
| 637 |  | if (storageLayout_ & DataStorage::dslAmat) { | 
| 638 | < | AtomCommMatrixRow->gather(snap_->atomData.aMat, | 
| 638 | > | AtomPlanMatrixRow->gather(snap_->atomData.aMat, | 
| 639 |  | atomRowData.aMat); | 
| 640 | < | AtomCommMatrixColumn->gather(snap_->atomData.aMat, | 
| 640 | > | AtomPlanMatrixColumn->gather(snap_->atomData.aMat, | 
| 641 |  | atomColData.aMat); | 
| 642 |  | } | 
| 643 |  |  | 
| 644 |  | // if needed, gather the atomic eletrostatic frames | 
| 645 |  | if (storageLayout_ & DataStorage::dslElectroFrame) { | 
| 646 | < | AtomCommMatrixRow->gather(snap_->atomData.electroFrame, | 
| 646 | > | AtomPlanMatrixRow->gather(snap_->atomData.electroFrame, | 
| 647 |  | atomRowData.electroFrame); | 
| 648 | < | AtomCommMatrixColumn->gather(snap_->atomData.electroFrame, | 
| 648 | > | AtomPlanMatrixColumn->gather(snap_->atomData.electroFrame, | 
| 649 |  | atomColData.electroFrame); | 
| 650 |  | } | 
| 651 | + |  | 
| 652 | + | // if needed, gather the atomic fluctuating charge values | 
| 653 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 654 | + | AtomPlanRealRow->gather(snap_->atomData.flucQPos, | 
| 655 | + | atomRowData.flucQPos); | 
| 656 | + | AtomPlanRealColumn->gather(snap_->atomData.flucQPos, | 
| 657 | + | atomColData.flucQPos); | 
| 658 | + | } | 
| 659 | + |  | 
| 660 |  | #endif | 
| 661 |  | } | 
| 662 |  |  | 
| 670 |  |  | 
| 671 |  | if (storageLayout_ & DataStorage::dslDensity) { | 
| 672 |  |  | 
| 673 | < | AtomCommRealRow->scatter(atomRowData.density, | 
| 673 | > | AtomPlanRealRow->scatter(atomRowData.density, | 
| 674 |  | snap_->atomData.density); | 
| 675 |  |  | 
| 676 |  | int n = snap_->atomData.density.size(); | 
| 677 |  | vector<RealType> rho_tmp(n, 0.0); | 
| 678 | < | AtomCommRealColumn->scatter(atomColData.density, rho_tmp); | 
| 678 | > | AtomPlanRealColumn->scatter(atomColData.density, rho_tmp); | 
| 679 |  | for (int i = 0; i < n; i++) | 
| 680 |  | snap_->atomData.density[i] += rho_tmp[i]; | 
| 681 |  | } | 
| 682 | + |  | 
| 683 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 684 | + |  | 
| 685 | + | AtomPlanVectorRow->scatter(atomRowData.electricField, | 
| 686 | + | snap_->atomData.electricField); | 
| 687 | + |  | 
| 688 | + | int n = snap_->atomData.electricField.size(); | 
| 689 | + | vector<Vector3d> field_tmp(n, V3Zero); | 
| 690 | + | AtomPlanVectorColumn->scatter(atomColData.electricField, field_tmp); | 
| 691 | + | for (int i = 0; i < n; i++) | 
| 692 | + | snap_->atomData.electricField[i] += field_tmp[i]; | 
| 693 | + | } | 
| 694 |  | #endif | 
| 695 |  | } | 
| 696 |  |  | 
| 703 |  | storageLayout_ = sman_->getStorageLayout(); | 
| 704 |  | #ifdef IS_MPI | 
| 705 |  | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 706 | < | AtomCommRealRow->gather(snap_->atomData.functional, | 
| 706 | > | AtomPlanRealRow->gather(snap_->atomData.functional, | 
| 707 |  | atomRowData.functional); | 
| 708 | < | AtomCommRealColumn->gather(snap_->atomData.functional, | 
| 708 | > | AtomPlanRealColumn->gather(snap_->atomData.functional, | 
| 709 |  | atomColData.functional); | 
| 710 |  | } | 
| 711 |  |  | 
| 712 |  | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 713 | < | AtomCommRealRow->gather(snap_->atomData.functionalDerivative, | 
| 713 | > | AtomPlanRealRow->gather(snap_->atomData.functionalDerivative, | 
| 714 |  | atomRowData.functionalDerivative); | 
| 715 | < | AtomCommRealColumn->gather(snap_->atomData.functionalDerivative, | 
| 715 | > | AtomPlanRealColumn->gather(snap_->atomData.functionalDerivative, | 
| 716 |  | atomColData.functionalDerivative); | 
| 717 |  | } | 
| 718 |  | #endif | 
| 726 |  | int n = snap_->atomData.force.size(); | 
| 727 |  | vector<Vector3d> frc_tmp(n, V3Zero); | 
| 728 |  |  | 
| 729 | < | AtomCommVectorRow->scatter(atomRowData.force, frc_tmp); | 
| 729 | > | AtomPlanVectorRow->scatter(atomRowData.force, frc_tmp); | 
| 730 |  | for (int i = 0; i < n; i++) { | 
| 731 |  | snap_->atomData.force[i] += frc_tmp[i]; | 
| 732 |  | frc_tmp[i] = 0.0; | 
| 733 |  | } | 
| 734 |  |  | 
| 735 | < | AtomCommVectorColumn->scatter(atomColData.force, frc_tmp); | 
| 736 | < | for (int i = 0; i < n; i++) | 
| 735 | > | AtomPlanVectorColumn->scatter(atomColData.force, frc_tmp); | 
| 736 | > | for (int i = 0; i < n; i++) { | 
| 737 |  | snap_->atomData.force[i] += frc_tmp[i]; | 
| 738 | < |  | 
| 739 | < |  | 
| 738 | > | } | 
| 739 | > |  | 
| 740 |  | if (storageLayout_ & DataStorage::dslTorque) { | 
| 741 |  |  | 
| 742 |  | int nt = snap_->atomData.torque.size(); | 
| 743 |  | vector<Vector3d> trq_tmp(nt, V3Zero); | 
| 744 |  |  | 
| 745 | < | AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp); | 
| 745 | > | AtomPlanVectorRow->scatter(atomRowData.torque, trq_tmp); | 
| 746 |  | for (int i = 0; i < nt; i++) { | 
| 747 |  | snap_->atomData.torque[i] += trq_tmp[i]; | 
| 748 |  | trq_tmp[i] = 0.0; | 
| 749 |  | } | 
| 750 |  |  | 
| 751 | < | AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp); | 
| 751 | > | AtomPlanVectorColumn->scatter(atomColData.torque, trq_tmp); | 
| 752 |  | for (int i = 0; i < nt; i++) | 
| 753 |  | snap_->atomData.torque[i] += trq_tmp[i]; | 
| 754 |  | } | 
| 758 |  | int ns = snap_->atomData.skippedCharge.size(); | 
| 759 |  | vector<RealType> skch_tmp(ns, 0.0); | 
| 760 |  |  | 
| 761 | < | AtomCommRealRow->scatter(atomRowData.skippedCharge, skch_tmp); | 
| 761 | > | AtomPlanRealRow->scatter(atomRowData.skippedCharge, skch_tmp); | 
| 762 |  | for (int i = 0; i < ns; i++) { | 
| 763 | < | snap_->atomData.skippedCharge[i] = skch_tmp[i]; | 
| 763 | > | snap_->atomData.skippedCharge[i] += skch_tmp[i]; | 
| 764 |  | skch_tmp[i] = 0.0; | 
| 765 |  | } | 
| 766 |  |  | 
| 767 | < | AtomCommRealColumn->scatter(atomColData.skippedCharge, skch_tmp); | 
| 768 | < | for (int i = 0; i < ns; i++) | 
| 767 | > | AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); | 
| 768 | > | for (int i = 0; i < ns; i++) | 
| 769 |  | snap_->atomData.skippedCharge[i] += skch_tmp[i]; | 
| 770 | + |  | 
| 771 |  | } | 
| 772 |  |  | 
| 773 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 774 | + |  | 
| 775 | + | int nq = snap_->atomData.flucQFrc.size(); | 
| 776 | + | vector<RealType> fqfrc_tmp(nq, 0.0); | 
| 777 | + |  | 
| 778 | + | AtomPlanRealRow->scatter(atomRowData.flucQFrc, fqfrc_tmp); | 
| 779 | + | for (int i = 0; i < nq; i++) { | 
| 780 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 781 | + | fqfrc_tmp[i] = 0.0; | 
| 782 | + | } | 
| 783 | + |  | 
| 784 | + | AtomPlanRealColumn->scatter(atomColData.flucQFrc, fqfrc_tmp); | 
| 785 | + | for (int i = 0; i < nq; i++) | 
| 786 | + | snap_->atomData.flucQFrc[i] += fqfrc_tmp[i]; | 
| 787 | + |  | 
| 788 | + | } | 
| 789 | + |  | 
| 790 |  | nLocal_ = snap_->getNumberOfAtoms(); | 
| 791 |  |  | 
| 792 |  | vector<potVec> pot_temp(nLocal_, | 
| 793 |  | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 794 | + | vector<potVec> expot_temp(nLocal_, | 
| 795 | + | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 796 |  |  | 
| 797 |  | // scatter/gather pot_row into the members of my column | 
| 798 |  |  | 
| 799 | < | AtomCommPotRow->scatter(pot_row, pot_temp); | 
| 799 | > | AtomPlanPotRow->scatter(pot_row, pot_temp); | 
| 800 | > | AtomPlanPotRow->scatter(expot_row, expot_temp); | 
| 801 |  |  | 
| 802 | < | for (int ii = 0;  ii < pot_temp.size(); ii++ ) | 
| 802 | > | for (int ii = 0;  ii < pot_temp.size(); ii++ ) | 
| 803 |  | pairwisePot += pot_temp[ii]; | 
| 804 | < |  | 
| 804 | > |  | 
| 805 | > | for (int ii = 0;  ii < expot_temp.size(); ii++ ) | 
| 806 | > | excludedPot += expot_temp[ii]; | 
| 807 | > |  | 
| 808 | > | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 809 | > | // This is the pairwise contribution to the particle pot.  The | 
| 810 | > | // embedding contribution is added in each of the low level | 
| 811 | > | // non-bonded routines.  In single processor, this is done in | 
| 812 | > | // unpackInteractionData, not in collectData. | 
| 813 | > | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 814 | > | for (int i = 0; i < nLocal_; i++) { | 
| 815 | > | // factor of two is because the total potential terms are divided | 
| 816 | > | // by 2 in parallel due to row/ column scatter | 
| 817 | > | snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); | 
| 818 | > | } | 
| 819 | > | } | 
| 820 | > | } | 
| 821 | > |  | 
| 822 |  | fill(pot_temp.begin(), pot_temp.end(), | 
| 823 |  | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 824 | + | fill(expot_temp.begin(), expot_temp.end(), | 
| 825 | + | Vector<RealType, N_INTERACTION_FAMILIES> (0.0)); | 
| 826 |  |  | 
| 827 | < | AtomCommPotColumn->scatter(pot_col, pot_temp); | 
| 827 | > | AtomPlanPotColumn->scatter(pot_col, pot_temp); | 
| 828 | > | AtomPlanPotColumn->scatter(expot_col, expot_temp); | 
| 829 |  |  | 
| 830 |  | for (int ii = 0;  ii < pot_temp.size(); ii++ ) | 
| 831 |  | pairwisePot += pot_temp[ii]; | 
| 832 | + |  | 
| 833 | + | for (int ii = 0;  ii < expot_temp.size(); ii++ ) | 
| 834 | + | excludedPot += expot_temp[ii]; | 
| 835 | + |  | 
| 836 | + | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 837 | + | // This is the pairwise contribution to the particle pot.  The | 
| 838 | + | // embedding contribution is added in each of the low level | 
| 839 | + | // non-bonded routines.  In single processor, this is done in | 
| 840 | + | // unpackInteractionData, not in collectData. | 
| 841 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 842 | + | for (int i = 0; i < nLocal_; i++) { | 
| 843 | + | // factor of two is because the total potential terms are divided | 
| 844 | + | // by 2 in parallel due to row/ column scatter | 
| 845 | + | snap_->atomData.particlePot[i] += 2.0 * pot_temp[i](ii); | 
| 846 | + | } | 
| 847 | + | } | 
| 848 | + | } | 
| 849 | + |  | 
| 850 | + | if (storageLayout_ & DataStorage::dslParticlePot) { | 
| 851 | + | int npp = snap_->atomData.particlePot.size(); | 
| 852 | + | vector<RealType> ppot_temp(npp, 0.0); | 
| 853 | + |  | 
| 854 | + | // This is the direct or embedding contribution to the particle | 
| 855 | + | // pot. | 
| 856 | + |  | 
| 857 | + | AtomPlanRealRow->scatter(atomRowData.particlePot, ppot_temp); | 
| 858 | + | for (int i = 0; i < npp; i++) { | 
| 859 | + | snap_->atomData.particlePot[i] += ppot_temp[i]; | 
| 860 | + | } | 
| 861 | + |  | 
| 862 | + | fill(ppot_temp.begin(), ppot_temp.end(), 0.0); | 
| 863 | + |  | 
| 864 | + | AtomPlanRealColumn->scatter(atomColData.particlePot, ppot_temp); | 
| 865 | + | for (int i = 0; i < npp; i++) { | 
| 866 | + | snap_->atomData.particlePot[i] += ppot_temp[i]; | 
| 867 | + | } | 
| 868 | + | } | 
| 869 | + |  | 
| 870 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 871 | + | RealType ploc1 = pairwisePot[ii]; | 
| 872 | + | RealType ploc2 = 0.0; | 
| 873 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 874 | + | pairwisePot[ii] = ploc2; | 
| 875 | + | } | 
| 876 | + |  | 
| 877 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 878 | + | RealType ploc1 = excludedPot[ii]; | 
| 879 | + | RealType ploc2 = 0.0; | 
| 880 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 881 | + | excludedPot[ii] = ploc2; | 
| 882 | + | } | 
| 883 | + |  | 
| 884 | + | // Here be dragons. | 
| 885 | + | MPI::Intracomm col = colComm.getComm(); | 
| 886 | + |  | 
| 887 | + | col.Allreduce(MPI::IN_PLACE, | 
| 888 | + | &snap_->frameData.conductiveHeatFlux[0], 3, | 
| 889 | + | MPI::REALTYPE, MPI::SUM); | 
| 890 | + |  | 
| 891 | + |  | 
| 892 |  | #endif | 
| 893 |  |  | 
| 894 |  | } | 
| 895 |  |  | 
| 896 | + | /** | 
| 897 | + | * Collects information obtained during the post-pair (and embedding | 
| 898 | + | * functional) loops onto local data structures. | 
| 899 | + | */ | 
| 900 | + | void ForceMatrixDecomposition::collectSelfData() { | 
| 901 | + | snap_ = sman_->getCurrentSnapshot(); | 
| 902 | + | storageLayout_ = sman_->getStorageLayout(); | 
| 903 | + |  | 
| 904 | + | #ifdef IS_MPI | 
| 905 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 906 | + | RealType ploc1 = embeddingPot[ii]; | 
| 907 | + | RealType ploc2 = 0.0; | 
| 908 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 909 | + | embeddingPot[ii] = ploc2; | 
| 910 | + | } | 
| 911 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { | 
| 912 | + | RealType ploc1 = excludedSelfPot[ii]; | 
| 913 | + | RealType ploc2 = 0.0; | 
| 914 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); | 
| 915 | + | excludedSelfPot[ii] = ploc2; | 
| 916 | + | } | 
| 917 | + | #endif | 
| 918 | + |  | 
| 919 | + | } | 
| 920 | + |  | 
| 921 | + |  | 
| 922 | + |  | 
| 923 |  | int ForceMatrixDecomposition::getNAtomsInRow() { | 
| 924 |  | #ifdef IS_MPI | 
| 925 |  | return nAtomsInRow_; | 
| 960 |  | return d; | 
| 961 |  | } | 
| 962 |  |  | 
| 963 | + | Vector3d ForceMatrixDecomposition::getGroupVelocityColumn(int cg2){ | 
| 964 | + | #ifdef IS_MPI | 
| 965 | + | return cgColData.velocity[cg2]; | 
| 966 | + | #else | 
| 967 | + | return snap_->cgData.velocity[cg2]; | 
| 968 | + | #endif | 
| 969 | + | } | 
| 970 |  |  | 
| 971 | + | Vector3d ForceMatrixDecomposition::getAtomVelocityColumn(int atom2){ | 
| 972 | + | #ifdef IS_MPI | 
| 973 | + | return atomColData.velocity[atom2]; | 
| 974 | + | #else | 
| 975 | + | return snap_->atomData.velocity[atom2]; | 
| 976 | + | #endif | 
| 977 | + | } | 
| 978 | + |  | 
| 979 | + |  | 
| 980 |  | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ | 
| 981 |  |  | 
| 982 |  | Vector3d d; | 
| 1042 |  | * We need to exclude some overcounted interactions that result from | 
| 1043 |  | * the parallel decomposition. | 
| 1044 |  | */ | 
| 1045 | < | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { | 
| 1046 | < | int unique_id_1, unique_id_2; | 
| 1047 | < |  | 
| 1045 | > | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2, int cg1, int cg2) { | 
| 1046 | > | int unique_id_1, unique_id_2, group1, group2; | 
| 1047 | > |  | 
| 1048 |  | #ifdef IS_MPI | 
| 1049 |  | // in MPI, we have to look up the unique IDs for each atom | 
| 1050 |  | unique_id_1 = AtomRowToGlobal[atom1]; | 
| 1051 |  | unique_id_2 = AtomColToGlobal[atom2]; | 
| 1052 | + | group1 = cgRowToGlobal[cg1]; | 
| 1053 | + | group2 = cgColToGlobal[cg2]; | 
| 1054 | + | #else | 
| 1055 | + | unique_id_1 = AtomLocalToGlobal[atom1]; | 
| 1056 | + | unique_id_2 = AtomLocalToGlobal[atom2]; | 
| 1057 | + | group1 = cgLocalToGlobal[cg1]; | 
| 1058 | + | group2 = cgLocalToGlobal[cg2]; | 
| 1059 | + | #endif | 
| 1060 |  |  | 
| 768 | – | // this situation should only arise in MPI simulations | 
| 1061 |  | if (unique_id_1 == unique_id_2) return true; | 
| 1062 | < |  | 
| 1062 | > |  | 
| 1063 | > | #ifdef IS_MPI | 
| 1064 |  | // this prevents us from doing the pair on multiple processors | 
| 1065 |  | if (unique_id_1 < unique_id_2) { | 
| 1066 |  | if ((unique_id_1 + unique_id_2) % 2 == 0) return true; | 
| 1067 |  | } else { | 
| 1068 | < | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 1068 | > | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; | 
| 1069 |  | } | 
| 1070 | + | #endif | 
| 1071 | + |  | 
| 1072 | + | #ifndef IS_MPI | 
| 1073 | + | if (group1 == group2) { | 
| 1074 | + | if (unique_id_1 < unique_id_2) return true; | 
| 1075 | + | } | 
| 1076 |  | #endif | 
| 1077 | + |  | 
| 1078 |  | return false; | 
| 1079 |  | } | 
| 1080 |  |  | 
| 1088 |  | * field) must still be handled for these pairs. | 
| 1089 |  | */ | 
| 1090 |  | bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { | 
| 1091 | < | int unique_id_2; | 
| 1091 | > |  | 
| 1092 | > | // excludesForAtom was constructed to use row/column indices in the MPI | 
| 1093 | > | // version, and to use local IDs in the non-MPI version: | 
| 1094 |  |  | 
| 793 | – | #ifdef IS_MPI | 
| 794 | – | // in MPI, we have to look up the unique IDs for the row atom. | 
| 795 | – | unique_id_2 = AtomColToGlobal[atom2]; | 
| 796 | – | #else | 
| 797 | – | // in the normal loop, the atom numbers are unique | 
| 798 | – | unique_id_2 = atom2; | 
| 799 | – | #endif | 
| 800 | – |  | 
| 1095 |  | for (vector<int>::iterator i = excludesForAtom[atom1].begin(); | 
| 1096 |  | i != excludesForAtom[atom1].end(); ++i) { | 
| 1097 | < | if ( (*i) == unique_id_2 ) return true; | 
| 1097 | > | if ( (*i) == atom2 ) return true; | 
| 1098 |  | } | 
| 1099 |  |  | 
| 1100 |  | return false; | 
| 1124 |  | idat.excluded = excludeAtomPair(atom1, atom2); | 
| 1125 |  |  | 
| 1126 |  | #ifdef IS_MPI | 
| 1127 | + | idat.atypes = make_pair( atypesRow[atom1], atypesCol[atom2]); | 
| 1128 | + | //idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]), | 
| 1129 | + | //                         ff_->getAtomType(identsCol[atom2]) ); | 
| 1130 |  |  | 
| 834 | – | idat.atypes = make_pair( ff_->getAtomType(identsRow[atom1]), | 
| 835 | – | ff_->getAtomType(identsCol[atom2]) ); | 
| 836 | – |  | 
| 1131 |  | if (storageLayout_ & DataStorage::dslAmat) { | 
| 1132 |  | idat.A1 = &(atomRowData.aMat[atom1]); | 
| 1133 |  | idat.A2 = &(atomColData.aMat[atom2]); | 
| 1168 |  | idat.skippedCharge2 = &(atomColData.skippedCharge[atom2]); | 
| 1169 |  | } | 
| 1170 |  |  | 
| 1171 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 1172 | + | idat.flucQ1 = &(atomRowData.flucQPos[atom1]); | 
| 1173 | + | idat.flucQ2 = &(atomColData.flucQPos[atom2]); | 
| 1174 | + | } | 
| 1175 | + |  | 
| 1176 |  | #else | 
| 1177 | + |  | 
| 1178 | + | idat.atypes = make_pair( atypesLocal[atom1], atypesLocal[atom2]); | 
| 1179 |  |  | 
| 879 | – | idat.atypes = make_pair( ff_->getAtomType(idents[atom1]), | 
| 880 | – | ff_->getAtomType(idents[atom2]) ); | 
| 881 | – |  | 
| 1180 |  | if (storageLayout_ & DataStorage::dslAmat) { | 
| 1181 |  | idat.A1 = &(snap_->atomData.aMat[atom1]); | 
| 1182 |  | idat.A2 = &(snap_->atomData.aMat[atom2]); | 
| 1216 |  | idat.skippedCharge1 = &(snap_->atomData.skippedCharge[atom1]); | 
| 1217 |  | idat.skippedCharge2 = &(snap_->atomData.skippedCharge[atom2]); | 
| 1218 |  | } | 
| 1219 | + |  | 
| 1220 | + | if (storageLayout_ & DataStorage::dslFlucQPosition) { | 
| 1221 | + | idat.flucQ1 = &(snap_->atomData.flucQPos[atom1]); | 
| 1222 | + | idat.flucQ2 = &(snap_->atomData.flucQPos[atom2]); | 
| 1223 | + | } | 
| 1224 | + |  | 
| 1225 |  | #endif | 
| 1226 |  | } | 
| 1227 |  |  | 
| 1228 |  |  | 
| 1229 |  | void ForceMatrixDecomposition::unpackInteractionData(InteractionData &idat, int atom1, int atom2) { | 
| 1230 |  | #ifdef IS_MPI | 
| 1231 | < | pot_row[atom1] += 0.5 *  *(idat.pot); | 
| 1232 | < | pot_col[atom2] += 0.5 *  *(idat.pot); | 
| 1231 | > | pot_row[atom1] += RealType(0.5) *  *(idat.pot); | 
| 1232 | > | pot_col[atom2] += RealType(0.5) *  *(idat.pot); | 
| 1233 | > | expot_row[atom1] += RealType(0.5) *  *(idat.excludedPot); | 
| 1234 | > | expot_col[atom2] += RealType(0.5) *  *(idat.excludedPot); | 
| 1235 |  |  | 
| 1236 |  | atomRowData.force[atom1] += *(idat.f1); | 
| 1237 |  | atomColData.force[atom2] -= *(idat.f1); | 
| 1238 | + |  | 
| 1239 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 1240 | + | atomRowData.flucQFrc[atom1] -= *(idat.dVdFQ1); | 
| 1241 | + | atomColData.flucQFrc[atom2] -= *(idat.dVdFQ2); | 
| 1242 | + | } | 
| 1243 | + |  | 
| 1244 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 1245 | + | atomRowData.electricField[atom1] += *(idat.eField1); | 
| 1246 | + | atomColData.electricField[atom2] += *(idat.eField2); | 
| 1247 | + | } | 
| 1248 | + |  | 
| 1249 |  | #else | 
| 1250 |  | pairwisePot += *(idat.pot); | 
| 1251 | + | excludedPot += *(idat.excludedPot); | 
| 1252 |  |  | 
| 1253 |  | snap_->atomData.force[atom1] += *(idat.f1); | 
| 1254 |  | snap_->atomData.force[atom2] -= *(idat.f1); | 
| 1255 | + |  | 
| 1256 | + | if (idat.doParticlePot) { | 
| 1257 | + | // This is the pairwise contribution to the particle pot.  The | 
| 1258 | + | // embedding contribution is added in each of the low level | 
| 1259 | + | // non-bonded routines.  In parallel, this calculation is done | 
| 1260 | + | // in collectData, not in unpackInteractionData. | 
| 1261 | + | snap_->atomData.particlePot[atom1] += *(idat.vpair) * *(idat.sw); | 
| 1262 | + | snap_->atomData.particlePot[atom2] += *(idat.vpair) * *(idat.sw); | 
| 1263 | + | } | 
| 1264 | + |  | 
| 1265 | + | if (storageLayout_ & DataStorage::dslFlucQForce) { | 
| 1266 | + | snap_->atomData.flucQFrc[atom1] -= *(idat.dVdFQ1); | 
| 1267 | + | snap_->atomData.flucQFrc[atom2] -= *(idat.dVdFQ2); | 
| 1268 | + | } | 
| 1269 | + |  | 
| 1270 | + | if (storageLayout_ & DataStorage::dslElectricField) { | 
| 1271 | + | snap_->atomData.electricField[atom1] += *(idat.eField1); | 
| 1272 | + | snap_->atomData.electricField[atom2] += *(idat.eField2); | 
| 1273 | + | } | 
| 1274 | + |  | 
| 1275 |  | #endif | 
| 1276 |  |  | 
| 1277 |  | } | 
| 1353 |  | // add this cutoff group to the list of groups in this cell; | 
| 1354 |  | cellListRow_[cellIndex].push_back(i); | 
| 1355 |  | } | 
| 1018 | – |  | 
| 1356 |  | for (int i = 0; i < nGroupsInCol_; i++) { | 
| 1357 |  | rs = cgColData.position[i]; | 
| 1358 |  |  | 
| 1377 |  | // add this cutoff group to the list of groups in this cell; | 
| 1378 |  | cellListCol_[cellIndex].push_back(i); | 
| 1379 |  | } | 
| 1380 | + |  | 
| 1381 |  | #else | 
| 1382 |  | for (int i = 0; i < nGroups_; i++) { | 
| 1383 |  | rs = snap_->cgData.position[i]; | 
| 1398 |  | whichCell.z() = nCells_.z() * scaled.z(); | 
| 1399 |  |  | 
| 1400 |  | // find single index of this cell: | 
| 1401 | < | cellIndex = Vlinear(whichCell, nCells_); | 
| 1401 | > | cellIndex = Vlinear(whichCell, nCells_); | 
| 1402 |  |  | 
| 1403 |  | // add this cutoff group to the list of groups in this cell; | 
| 1404 |  | cellList_[cellIndex].push_back(i); | 
| 1405 |  | } | 
| 1406 | + |  | 
| 1407 |  | #endif | 
| 1408 |  |  | 
| 1409 |  | for (int m1z = 0; m1z < nCells_.z(); m1z++) { | 
| 1416 |  | os != cellOffsets_.end(); ++os) { | 
| 1417 |  |  | 
| 1418 |  | Vector3i m2v = m1v + (*os); | 
| 1419 | < |  | 
| 1419 | > |  | 
| 1420 | > |  | 
| 1421 |  | if (m2v.x() >= nCells_.x()) { | 
| 1422 |  | m2v.x() = 0; | 
| 1423 |  | } else if (m2v.x() < 0) { | 
| 1435 |  | } else if (m2v.z() < 0) { | 
| 1436 |  | m2v.z() = nCells_.z() - 1; | 
| 1437 |  | } | 
| 1438 | < |  | 
| 1438 | > |  | 
| 1439 |  | int m2 = Vlinear (m2v, nCells_); | 
| 1440 |  |  | 
| 1441 |  | #ifdef IS_MPI | 
| 1444 |  | for (vector<int>::iterator j2 = cellListCol_[m2].begin(); | 
| 1445 |  | j2 != cellListCol_[m2].end(); ++j2) { | 
| 1446 |  |  | 
| 1447 | < | // Always do this if we're in different cells or if | 
| 1448 | < | // we're in the same cell and the global index of the | 
| 1449 | < | // j2 cutoff group is less than the j1 cutoff group | 
| 1450 | < |  | 
| 1451 | < | if (m2 != m1 || cgColToGlobal[(*j2)] < cgRowToGlobal[(*j1)]) { | 
| 1452 | < | dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)]; | 
| 1453 | < | snap_->wrapVector(dr); | 
| 1454 | < | cuts = getGroupCutoffs( (*j1), (*j2) ); | 
| 1455 | < | if (dr.lengthSquare() < cuts.third) { | 
| 1116 | < | neighborList.push_back(make_pair((*j1), (*j2))); | 
| 1117 | < | } | 
| 1118 | < | } | 
| 1447 | > | // In parallel, we need to visit *all* pairs of row | 
| 1448 | > | // & column indicies and will divide labor in the | 
| 1449 | > | // force evaluation later. | 
| 1450 | > | dr = cgColData.position[(*j2)] - cgRowData.position[(*j1)]; | 
| 1451 | > | snap_->wrapVector(dr); | 
| 1452 | > | cuts = getGroupCutoffs( (*j1), (*j2) ); | 
| 1453 | > | if (dr.lengthSquare() < cuts.third) { | 
| 1454 | > | neighborList.push_back(make_pair((*j1), (*j2))); | 
| 1455 | > | } | 
| 1456 |  | } | 
| 1457 |  | } | 
| 1458 |  | #else | 
| 1122 | – |  | 
| 1459 |  | for (vector<int>::iterator j1 = cellList_[m1].begin(); | 
| 1460 |  | j1 != cellList_[m1].end(); ++j1) { | 
| 1461 |  | for (vector<int>::iterator j2 = cellList_[m2].begin(); | 
| 1462 |  | j2 != cellList_[m2].end(); ++j2) { | 
| 1463 | < |  | 
| 1463 | > |  | 
| 1464 |  | // Always do this if we're in different cells or if | 
| 1465 | < | // we're in the same cell and the global index of the | 
| 1466 | < | // j2 cutoff group is less than the j1 cutoff group | 
| 1467 | < |  | 
| 1468 | < | if (m2 != m1 || (*j2) < (*j1)) { | 
| 1465 | > | // we're in the same cell and the global index of | 
| 1466 | > | // the j2 cutoff group is greater than or equal to | 
| 1467 | > | // the j1 cutoff group.  Note that Rappaport's code | 
| 1468 | > | // has a "less than" conditional here, but that | 
| 1469 | > | // deals with atom-by-atom computation.  OpenMD | 
| 1470 | > | // allows atoms within a single cutoff group to | 
| 1471 | > | // interact with each other. | 
| 1472 | > |  | 
| 1473 | > |  | 
| 1474 | > |  | 
| 1475 | > | if (m2 != m1 || (*j2) >= (*j1) ) { | 
| 1476 | > |  | 
| 1477 |  | dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; | 
| 1478 |  | snap_->wrapVector(dr); | 
| 1479 |  | cuts = getGroupCutoffs( (*j1), (*j2) ); | 
| 1492 |  | // branch to do all cutoff group pairs | 
| 1493 |  | #ifdef IS_MPI | 
| 1494 |  | for (int j1 = 0; j1 < nGroupsInRow_; j1++) { | 
| 1495 | < | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1495 | > | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { | 
| 1496 |  | dr = cgColData.position[j2] - cgRowData.position[j1]; | 
| 1497 |  | snap_->wrapVector(dr); | 
| 1498 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1500 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1501 |  | } | 
| 1502 |  | } | 
| 1503 | < | } | 
| 1503 | > | } | 
| 1504 |  | #else | 
| 1505 | < | for (int j1 = 0; j1 < nGroups_ - 1; j1++) { | 
| 1506 | < | for (int j2 = j1 + 1; j2 < nGroups_; j2++) { | 
| 1505 | > | // include all groups here. | 
| 1506 | > | for (int j1 = 0; j1 < nGroups_; j1++) { | 
| 1507 | > | // include self group interactions j2 == j1 | 
| 1508 | > | for (int j2 = j1; j2 < nGroups_; j2++) { | 
| 1509 |  | dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; | 
| 1510 |  | snap_->wrapVector(dr); | 
| 1511 |  | cuts = getGroupCutoffs( j1, j2 ); | 
| 1512 |  | if (dr.lengthSquare() < cuts.third) { | 
| 1513 |  | neighborList.push_back(make_pair(j1, j2)); | 
| 1514 |  | } | 
| 1515 | < | } | 
| 1516 | < | } | 
| 1515 | > | } | 
| 1516 | > | } | 
| 1517 |  | #endif | 
| 1518 |  | } | 
| 1519 |  |  |