# | Line 229 | Line 229 | namespace OpenMD { | |
---|---|---|
229 | topoDist[i].push_back(3); | |
230 | } | |
231 | } | |
232 | – | } |
233 | – | } |
234 | – | } |
235 | – | |
236 | – | #endif |
237 | – | |
238 | – | // allocate memory for the parallel objects |
239 | – | atypesLocal.resize(nLocal_); |
240 | – | |
241 | – | for (int i = 0; i < nLocal_; i++) |
242 | – | atypesLocal[i] = ff_->getAtomType(idents[i]); |
243 | – | |
244 | – | groupList_.clear(); |
245 | – | groupList_.resize(nGroups_); |
246 | – | for (int i = 0; i < nGroups_; i++) { |
247 | – | int gid = cgLocalToGlobal[i]; |
248 | – | for (int j = 0; j < nLocal_; j++) { |
249 | – | int aid = AtomLocalToGlobal[j]; |
250 | – | if (globalGroupMembership[aid] == gid) { |
251 | – | groupList_[i].push_back(j); |
232 | } | |
233 | } | |
234 | } | |
235 | ||
236 | + | #else |
237 | excludesForAtom.clear(); | |
238 | excludesForAtom.resize(nLocal_); | |
239 | toposForAtom.clear(); | |
# | Line 285 | Line 266 | namespace OpenMD { | |
266 | } | |
267 | } | |
268 | } | |
269 | < | |
269 | > | #endif |
270 | > | |
271 | > | // allocate memory for the parallel objects |
272 | > | atypesLocal.resize(nLocal_); |
273 | > | |
274 | > | for (int i = 0; i < nLocal_; i++) |
275 | > | atypesLocal[i] = ff_->getAtomType(idents[i]); |
276 | > | |
277 | > | groupList_.clear(); |
278 | > | groupList_.resize(nGroups_); |
279 | > | for (int i = 0; i < nGroups_; i++) { |
280 | > | int gid = cgLocalToGlobal[i]; |
281 | > | for (int j = 0; j < nLocal_; j++) { |
282 | > | int aid = AtomLocalToGlobal[j]; |
283 | > | if (globalGroupMembership[aid] == gid) { |
284 | > | groupList_[i].push_back(j); |
285 | > | } |
286 | > | } |
287 | > | } |
288 | > | |
289 | > | |
290 | createGtypeCutoffMap(); | |
291 | ||
292 | } | |
# | Line 683 | Line 684 | namespace OpenMD { | |
684 | } | |
685 | ||
686 | AtomPlanRealColumn->scatter(atomColData.skippedCharge, skch_tmp); | |
687 | < | for (int i = 0; i < ns; i++) |
687 | > | for (int i = 0; i < ns; i++) |
688 | snap_->atomData.skippedCharge[i] += skch_tmp[i]; | |
689 | + | |
690 | } | |
691 | ||
692 | nLocal_ = snap_->getNumberOfAtoms(); | |
# | Line 714 | Line 716 | namespace OpenMD { | |
716 | pairwisePot[ii] = ploc2; | |
717 | } | |
718 | ||
719 | + | for (int ii = 0; ii < N_INTERACTION_FAMILIES; ii++) { |
720 | + | RealType ploc1 = embeddingPot[ii]; |
721 | + | RealType ploc2 = 0.0; |
722 | + | MPI::COMM_WORLD.Allreduce(&ploc1, &ploc2, 1, MPI::REALTYPE, MPI::SUM); |
723 | + | embeddingPot[ii] = ploc2; |
724 | + | } |
725 | + | |
726 | #endif | |
727 | ||
728 | } | |
# | Line 826 | Line 835 | namespace OpenMD { | |
835 | */ | |
836 | bool ForceMatrixDecomposition::skipAtomPair(int atom1, int atom2) { | |
837 | int unique_id_1, unique_id_2; | |
838 | < | |
838 | > | |
839 | #ifdef IS_MPI | |
840 | // in MPI, we have to look up the unique IDs for each atom | |
841 | unique_id_1 = AtomRowToGlobal[atom1]; | |
842 | unique_id_2 = AtomColToGlobal[atom2]; | |
843 | < | |
844 | < | // this situation should only arise in MPI simulations |
843 | > | #else |
844 | > | unique_id_1 = AtomLocalToGlobal[atom1]; |
845 | > | unique_id_2 = AtomLocalToGlobal[atom2]; |
846 | > | #endif |
847 | > | |
848 | if (unique_id_1 == unique_id_2) return true; | |
849 | < | |
849 | > | |
850 | > | #ifdef IS_MPI |
851 | // this prevents us from doing the pair on multiple processors | |
852 | if (unique_id_1 < unique_id_2) { | |
853 | if ((unique_id_1 + unique_id_2) % 2 == 0) return true; | |
854 | } else { | |
855 | < | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; |
855 | > | if ((unique_id_1 + unique_id_2) % 2 == 1) return true; |
856 | } | |
857 | #endif | |
858 | + | |
859 | return false; | |
860 | } | |
861 | ||
# | Line 855 | Line 869 | namespace OpenMD { | |
869 | * field) must still be handled for these pairs. | |
870 | */ | |
871 | bool ForceMatrixDecomposition::excludeAtomPair(int atom1, int atom2) { | |
872 | < | int unique_id_2; |
873 | < | #ifdef IS_MPI |
874 | < | // in MPI, we have to look up the unique IDs for the row atom. |
861 | < | unique_id_2 = AtomColToGlobal[atom2]; |
862 | < | #else |
863 | < | // in the normal loop, the atom numbers are unique |
864 | < | unique_id_2 = atom2; |
865 | < | #endif |
872 | > | |
873 | > | // excludesForAtom was constructed to use row/column indices in the MPI |
874 | > | // version, and to use local IDs in the non-MPI version: |
875 | ||
876 | for (vector<int>::iterator i = excludesForAtom[atom1].begin(); | |
877 | i != excludesForAtom[atom1].end(); ++i) { | |
878 | < | if ( (*i) == unique_id_2 ) return true; |
878 | > | if ( (*i) == atom2 ) return true; |
879 | } | |
880 | ||
881 | return false; | |
# | Line 1185 | Line 1194 | namespace OpenMD { | |
1194 | } | |
1195 | } | |
1196 | #else | |
1188 | – | |
1197 | for (vector<int>::iterator j1 = cellList_[m1].begin(); | |
1198 | j1 != cellList_[m1].end(); ++j1) { | |
1199 | for (vector<int>::iterator j2 = cellList_[m2].begin(); | |
1200 | j2 != cellList_[m2].end(); ++j2) { | |
1201 | < | |
1201 | > | |
1202 | // Always do this if we're in different cells or if | |
1203 | < | // we're in the same cell and the global index of the |
1204 | < | // j2 cutoff group is less than the j1 cutoff group |
1205 | < | |
1206 | < | if (m2 != m1 || (*j2) < (*j1)) { |
1203 | > | // we're in the same cell and the global index of |
1204 | > | // the j2 cutoff group is greater than or equal to |
1205 | > | // the j1 cutoff group. Note that Rappaport's code |
1206 | > | // has a "less than" conditional here, but that |
1207 | > | // deals with atom-by-atom computation. OpenMD |
1208 | > | // allows atoms within a single cutoff group to |
1209 | > | // interact with each other. |
1210 | > | |
1211 | > | |
1212 | > | |
1213 | > | if (m2 != m1 || (*j2) >= (*j1) ) { |
1214 | > | |
1215 | dr = snap_->cgData.position[(*j2)] - snap_->cgData.position[(*j1)]; | |
1216 | snap_->wrapVector(dr); | |
1217 | cuts = getGroupCutoffs( (*j1), (*j2) ); | |
# | Line 1214 | Line 1230 | namespace OpenMD { | |
1230 | // branch to do all cutoff group pairs | |
1231 | #ifdef IS_MPI | |
1232 | for (int j1 = 0; j1 < nGroupsInRow_; j1++) { | |
1233 | < | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { |
1233 | > | for (int j2 = 0; j2 < nGroupsInCol_; j2++) { |
1234 | dr = cgColData.position[j2] - cgRowData.position[j1]; | |
1235 | snap_->wrapVector(dr); | |
1236 | cuts = getGroupCutoffs( j1, j2 ); | |
# | Line 1222 | Line 1238 | namespace OpenMD { | |
1238 | neighborList.push_back(make_pair(j1, j2)); | |
1239 | } | |
1240 | } | |
1241 | < | } |
1241 | > | } |
1242 | #else | |
1243 | < | for (int j1 = 0; j1 < nGroups_ - 1; j1++) { |
1244 | < | for (int j2 = j1 + 1; j2 < nGroups_; j2++) { |
1243 | > | // include all groups here. |
1244 | > | for (int j1 = 0; j1 < nGroups_; j1++) { |
1245 | > | // include self group interactions j2 == j1 |
1246 | > | for (int j2 = j1; j2 < nGroups_; j2++) { |
1247 | dr = snap_->cgData.position[j2] - snap_->cgData.position[j1]; | |
1248 | snap_->wrapVector(dr); | |
1249 | cuts = getGroupCutoffs( j1, j2 ); | |
1250 | if (dr.lengthSquare() < cuts.third) { | |
1251 | neighborList.push_back(make_pair(j1, j2)); | |
1252 | } | |
1253 | < | } |
1254 | < | } |
1253 | > | } |
1254 | > | } |
1255 | #endif | |
1256 | } | |
1257 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |