| 1 | /* | 
| 2 | * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. | 
| 3 | * | 
| 4 | * The University of Notre Dame grants you ("Licensee") a | 
| 5 | * non-exclusive, royalty free, license to use, modify and | 
| 6 | * redistribute this software in source and binary code form, provided | 
| 7 | * that the following conditions are met: | 
| 8 | * | 
| 9 | * 1. Redistributions of source code must retain the above copyright | 
| 10 | *    notice, this list of conditions and the following disclaimer. | 
| 11 | * | 
| 12 | * 2. Redistributions in binary form must reproduce the above copyright | 
| 13 | *    notice, this list of conditions and the following disclaimer in the | 
| 14 | *    documentation and/or other materials provided with the | 
| 15 | *    distribution. | 
| 16 | * | 
| 17 | * This software is provided "AS IS," without a warranty of any | 
| 18 | * kind. All express or implied conditions, representations and | 
| 19 | * warranties, including any implied warranty of merchantability, | 
| 20 | * fitness for a particular purpose or non-infringement, are hereby | 
| 21 | * excluded.  The University of Notre Dame and its licensors shall not | 
| 22 | * be liable for any damages suffered by licensee as a result of | 
| 23 | * using, modifying or distributing the software or its | 
| 24 | * derivatives. In no event will the University of Notre Dame or its | 
| 25 | * licensors be liable for any lost revenue, profit or data, or for | 
| 26 | * direct, indirect, special, consequential, incidental or punitive | 
| 27 | * damages, however caused and regardless of the theory of liability, | 
| 28 | * arising out of the use of or inability to use software, even if the | 
| 29 | * University of Notre Dame has been advised of the possibility of | 
| 30 | * such damages. | 
| 31 | * | 
| 32 | * SUPPORT OPEN SCIENCE!  If you use OpenMD or its source code in your | 
| 33 | * research, please cite the appropriate papers when you publish your | 
| 34 | * work.  Good starting points are: | 
| 35 | * | 
| 36 | * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | 
| 37 | * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | 
| 38 | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 40 | */ | 
| 41 | #include "parallel/ForceMatrixDecomposition.hpp" | 
| 42 | #include "math/SquareMatrix3.hpp" | 
| 43 | #include "nonbonded/NonBondedInteraction.hpp" | 
| 44 | #include "brains/SnapshotManager.hpp" | 
| 45 |  | 
| 46 | using namespace std; | 
| 47 | namespace OpenMD { | 
| 48 |  | 
| 49 | /** | 
| 50 | * distributeInitialData is essentially a copy of the older fortran | 
| 51 | * SimulationSetup | 
| 52 | */ | 
| 53 |  | 
| 54 | void ForceMatrixDecomposition::distributeInitialData() { | 
| 55 | snap_ = sman_->getCurrentSnapshot(); | 
| 56 | storageLayout_ = sman_->getStorageLayout(); | 
| 57 | #ifdef IS_MPI | 
| 58 | int nLocal = snap_->getNumberOfAtoms(); | 
| 59 | int nGroups = snap_->getNumberOfCutoffGroups(); | 
| 60 |  | 
| 61 | AtomCommIntRow = new Communicator<Row,int>(nLocal); | 
| 62 | AtomCommRealRow = new Communicator<Row,RealType>(nLocal); | 
| 63 | AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal); | 
| 64 | AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal); | 
| 65 |  | 
| 66 | AtomCommIntColumn = new Communicator<Column,int>(nLocal); | 
| 67 | AtomCommRealColumn = new Communicator<Column,RealType>(nLocal); | 
| 68 | AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal); | 
| 69 | AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal); | 
| 70 |  | 
| 71 | cgCommIntRow = new Communicator<Row,int>(nGroups); | 
| 72 | cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups); | 
| 73 | cgCommIntColumn = new Communicator<Column,int>(nGroups); | 
| 74 | cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups); | 
| 75 |  | 
| 76 | int nAtomsInRow = AtomCommIntRow->getSize(); | 
| 77 | int nAtomsInCol = AtomCommIntColumn->getSize(); | 
| 78 | int nGroupsInRow = cgCommIntRow->getSize(); | 
| 79 | int nGroupsInCol = cgCommIntColumn->getSize(); | 
| 80 |  | 
| 81 | // Modify the data storage objects with the correct layouts and sizes: | 
| 82 | atomRowData.resize(nAtomsInRow); | 
| 83 | atomRowData.setStorageLayout(storageLayout_); | 
| 84 | atomColData.resize(nAtomsInCol); | 
| 85 | atomColData.setStorageLayout(storageLayout_); | 
| 86 | cgRowData.resize(nGroupsInRow); | 
| 87 | cgRowData.setStorageLayout(DataStorage::dslPosition); | 
| 88 | cgColData.resize(nGroupsInCol); | 
| 89 | cgColData.setStorageLayout(DataStorage::dslPosition); | 
| 90 |  | 
| 91 | vector<vector<RealType> > pot_row(N_INTERACTION_FAMILIES, | 
| 92 | vector<RealType> (nAtomsInRow, 0.0)); | 
| 93 | vector<vector<RealType> > pot_col(N_INTERACTION_FAMILIES, | 
| 94 | vector<RealType> (nAtomsInCol, 0.0)); | 
| 95 |  | 
| 96 |  | 
| 97 | vector<RealType> pot_local(N_INTERACTION_FAMILIES, 0.0); | 
| 98 |  | 
| 99 | // gather the information for atomtype IDs (atids): | 
| 100 | vector<int> identsLocal = info_->getIdentArray(); | 
| 101 | identsRow.reserve(nAtomsInRow); | 
| 102 | identsCol.reserve(nAtomsInCol); | 
| 103 |  | 
| 104 | AtomCommIntRow->gather(identsLocal, identsRow); | 
| 105 | AtomCommIntColumn->gather(identsLocal, identsCol); | 
| 106 |  | 
| 107 | AtomLocalToGlobal = info_->getGlobalAtomIndices(); | 
| 108 | AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); | 
| 109 | AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); | 
| 110 |  | 
| 111 | cgLocalToGlobal = info_->getGlobalGroupIndices(); | 
| 112 | cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal); | 
| 113 | cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal); | 
| 114 |  | 
| 115 | // still need: | 
| 116 | // topoDist | 
| 117 | // exclude | 
| 118 | #endif | 
| 119 | } | 
| 120 |  | 
| 121 |  | 
| 122 |  | 
| 123 | void ForceMatrixDecomposition::distributeData()  { | 
| 124 | snap_ = sman_->getCurrentSnapshot(); | 
| 125 | storageLayout_ = sman_->getStorageLayout(); | 
| 126 | #ifdef IS_MPI | 
| 127 |  | 
| 128 | // gather up the atomic positions | 
| 129 | AtomCommVectorRow->gather(snap_->atomData.position, | 
| 130 | atomRowData.position); | 
| 131 | AtomCommVectorColumn->gather(snap_->atomData.position, | 
| 132 | atomColData.position); | 
| 133 |  | 
| 134 | // gather up the cutoff group positions | 
| 135 | cgCommVectorRow->gather(snap_->cgData.position, | 
| 136 | cgRowData.position); | 
| 137 | cgCommVectorColumn->gather(snap_->cgData.position, | 
| 138 | cgColData.position); | 
| 139 |  | 
| 140 | // if needed, gather the atomic rotation matrices | 
| 141 | if (storageLayout_ & DataStorage::dslAmat) { | 
| 142 | AtomCommMatrixRow->gather(snap_->atomData.aMat, | 
| 143 | atomRowData.aMat); | 
| 144 | AtomCommMatrixColumn->gather(snap_->atomData.aMat, | 
| 145 | atomColData.aMat); | 
| 146 | } | 
| 147 |  | 
| 148 | // if needed, gather the atomic eletrostatic frames | 
| 149 | if (storageLayout_ & DataStorage::dslElectroFrame) { | 
| 150 | AtomCommMatrixRow->gather(snap_->atomData.electroFrame, | 
| 151 | atomRowData.electroFrame); | 
| 152 | AtomCommMatrixColumn->gather(snap_->atomData.electroFrame, | 
| 153 | atomColData.electroFrame); | 
| 154 | } | 
| 155 | #endif | 
| 156 | } | 
| 157 |  | 
| 158 | void ForceMatrixDecomposition::collectIntermediateData() { | 
| 159 | snap_ = sman_->getCurrentSnapshot(); | 
| 160 | storageLayout_ = sman_->getStorageLayout(); | 
| 161 | #ifdef IS_MPI | 
| 162 |  | 
| 163 | if (storageLayout_ & DataStorage::dslDensity) { | 
| 164 |  | 
| 165 | AtomCommRealRow->scatter(atomRowData.density, | 
| 166 | snap_->atomData.density); | 
| 167 |  | 
| 168 | int n = snap_->atomData.density.size(); | 
| 169 | std::vector<RealType> rho_tmp(n, 0.0); | 
| 170 | AtomCommRealColumn->scatter(atomColData.density, rho_tmp); | 
| 171 | for (int i = 0; i < n; i++) | 
| 172 | snap_->atomData.density[i] += rho_tmp[i]; | 
| 173 | } | 
| 174 | #endif | 
| 175 | } | 
| 176 |  | 
| 177 | void ForceMatrixDecomposition::distributeIntermediateData() { | 
| 178 | snap_ = sman_->getCurrentSnapshot(); | 
| 179 | storageLayout_ = sman_->getStorageLayout(); | 
| 180 | #ifdef IS_MPI | 
| 181 | if (storageLayout_ & DataStorage::dslFunctional) { | 
| 182 | AtomCommRealRow->gather(snap_->atomData.functional, | 
| 183 | atomRowData.functional); | 
| 184 | AtomCommRealColumn->gather(snap_->atomData.functional, | 
| 185 | atomColData.functional); | 
| 186 | } | 
| 187 |  | 
| 188 | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 189 | AtomCommRealRow->gather(snap_->atomData.functionalDerivative, | 
| 190 | atomRowData.functionalDerivative); | 
| 191 | AtomCommRealColumn->gather(snap_->atomData.functionalDerivative, | 
| 192 | atomColData.functionalDerivative); | 
| 193 | } | 
| 194 | #endif | 
| 195 | } | 
| 196 |  | 
| 197 |  | 
| 198 | void ForceMatrixDecomposition::collectData() { | 
| 199 | snap_ = sman_->getCurrentSnapshot(); | 
| 200 | storageLayout_ = sman_->getStorageLayout(); | 
| 201 | #ifdef IS_MPI | 
| 202 | int n = snap_->atomData.force.size(); | 
| 203 | vector<Vector3d> frc_tmp(n, V3Zero); | 
| 204 |  | 
| 205 | AtomCommVectorRow->scatter(atomRowData.force, frc_tmp); | 
| 206 | for (int i = 0; i < n; i++) { | 
| 207 | snap_->atomData.force[i] += frc_tmp[i]; | 
| 208 | frc_tmp[i] = 0.0; | 
| 209 | } | 
| 210 |  | 
| 211 | AtomCommVectorColumn->scatter(atomColData.force, frc_tmp); | 
| 212 | for (int i = 0; i < n; i++) | 
| 213 | snap_->atomData.force[i] += frc_tmp[i]; | 
| 214 |  | 
| 215 |  | 
| 216 | if (storageLayout_ & DataStorage::dslTorque) { | 
| 217 |  | 
| 218 | int nt = snap_->atomData.force.size(); | 
| 219 | vector<Vector3d> trq_tmp(nt, V3Zero); | 
| 220 |  | 
| 221 | AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp); | 
| 222 | for (int i = 0; i < n; i++) { | 
| 223 | snap_->atomData.torque[i] += trq_tmp[i]; | 
| 224 | trq_tmp[i] = 0.0; | 
| 225 | } | 
| 226 |  | 
| 227 | AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp); | 
| 228 | for (int i = 0; i < n; i++) | 
| 229 | snap_->atomData.torque[i] += trq_tmp[i]; | 
| 230 | } | 
| 231 |  | 
| 232 | int nLocal = snap_->getNumberOfAtoms(); | 
| 233 |  | 
| 234 | vector<vector<RealType> > pot_temp(N_INTERACTION_FAMILIES, | 
| 235 | vector<RealType> (nLocal, 0.0)); | 
| 236 |  | 
| 237 | for (int i = 0; i < N_INTERACTION_FAMILIES; i++) { | 
| 238 | AtomCommRealRow->scatter(pot_row[i], pot_temp[i]); | 
| 239 | for (int ii = 0;  ii < pot_temp[i].size(); ii++ ) { | 
| 240 | pot_local[i] += pot_temp[i][ii]; | 
| 241 | } | 
| 242 | } | 
| 243 | #endif | 
| 244 | } | 
| 245 |  | 
| 246 |  | 
| 247 | Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){ | 
| 248 | Vector3d d; | 
| 249 |  | 
| 250 | #ifdef IS_MPI | 
| 251 | d = cgColData.position[cg2] - cgRowData.position[cg1]; | 
| 252 | #else | 
| 253 | d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1]; | 
| 254 | #endif | 
| 255 |  | 
| 256 | snap_->wrapVector(d); | 
| 257 | return d; | 
| 258 | } | 
| 259 |  | 
| 260 |  | 
| 261 | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ | 
| 262 |  | 
| 263 | Vector3d d; | 
| 264 |  | 
| 265 | #ifdef IS_MPI | 
| 266 | d = cgRowData.position[cg1] - atomRowData.position[atom1]; | 
| 267 | #else | 
| 268 | d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1]; | 
| 269 | #endif | 
| 270 |  | 
| 271 | snap_->wrapVector(d); | 
| 272 | return d; | 
| 273 | } | 
| 274 |  | 
| 275 | Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){ | 
| 276 | Vector3d d; | 
| 277 |  | 
| 278 | #ifdef IS_MPI | 
| 279 | d = cgColData.position[cg2] - atomColData.position[atom2]; | 
| 280 | #else | 
| 281 | d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2]; | 
| 282 | #endif | 
| 283 |  | 
| 284 | snap_->wrapVector(d); | 
| 285 | return d; | 
| 286 | } | 
| 287 |  | 
| 288 | Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){ | 
| 289 | Vector3d d; | 
| 290 |  | 
| 291 | #ifdef IS_MPI | 
| 292 | d = atomColData.position[atom2] - atomRowData.position[atom1]; | 
| 293 | #else | 
| 294 | d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1]; | 
| 295 | #endif | 
| 296 |  | 
| 297 | snap_->wrapVector(d); | 
| 298 | return d; | 
| 299 | } | 
| 300 |  | 
| 301 | void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){ | 
| 302 | #ifdef IS_MPI | 
| 303 | atomRowData.force[atom1] += fg; | 
| 304 | #else | 
| 305 | snap_->atomData.force[atom1] += fg; | 
| 306 | #endif | 
| 307 | } | 
| 308 |  | 
| 309 | void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){ | 
| 310 | #ifdef IS_MPI | 
| 311 | atomColData.force[atom2] += fg; | 
| 312 | #else | 
| 313 | snap_->atomData.force[atom2] += fg; | 
| 314 | #endif | 
| 315 |  | 
| 316 | } | 
| 317 |  | 
| 318 | // filling interaction blocks with pointers | 
| 319 | InteractionData ForceMatrixDecomposition::fillInteractionData(int atom1, int atom2) { | 
| 320 |  | 
| 321 | InteractionData idat; | 
| 322 | #ifdef IS_MPI | 
| 323 | if (storageLayout_ & DataStorage::dslAmat) { | 
| 324 | idat.A1 = &(atomRowData.aMat[atom1]); | 
| 325 | idat.A2 = &(atomColData.aMat[atom2]); | 
| 326 | } | 
| 327 |  | 
| 328 | if (storageLayout_ & DataStorage::dslElectroFrame) { | 
| 329 | idat.eFrame1 = &(atomRowData.electroFrame[atom1]); | 
| 330 | idat.eFrame2 = &(atomColData.electroFrame[atom2]); | 
| 331 | } | 
| 332 |  | 
| 333 | if (storageLayout_ & DataStorage::dslTorque) { | 
| 334 | idat.t1 = &(atomRowData.torque[atom1]); | 
| 335 | idat.t2 = &(atomColData.torque[atom2]); | 
| 336 | } | 
| 337 |  | 
| 338 | if (storageLayout_ & DataStorage::dslDensity) { | 
| 339 | idat.rho1 = &(atomRowData.density[atom1]); | 
| 340 | idat.rho2 = &(atomColData.density[atom2]); | 
| 341 | } | 
| 342 |  | 
| 343 | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 344 | idat.dfrho1 = &(atomRowData.functionalDerivative[atom1]); | 
| 345 | idat.dfrho2 = &(atomColData.functionalDerivative[atom2]); | 
| 346 | } | 
| 347 | #else | 
| 348 | if (storageLayout_ & DataStorage::dslAmat) { | 
| 349 | idat.A1 = &(snap_->atomData.aMat[atom1]); | 
| 350 | idat.A2 = &(snap_->atomData.aMat[atom2]); | 
| 351 | } | 
| 352 |  | 
| 353 | if (storageLayout_ & DataStorage::dslElectroFrame) { | 
| 354 | idat.eFrame1 = &(snap_->atomData.electroFrame[atom1]); | 
| 355 | idat.eFrame2 = &(snap_->atomData.electroFrame[atom2]); | 
| 356 | } | 
| 357 |  | 
| 358 | if (storageLayout_ & DataStorage::dslTorque) { | 
| 359 | idat.t1 = &(snap_->atomData.torque[atom1]); | 
| 360 | idat.t2 = &(snap_->atomData.torque[atom2]); | 
| 361 | } | 
| 362 |  | 
| 363 | if (storageLayout_ & DataStorage::dslDensity) { | 
| 364 | idat.rho1 = &(snap_->atomData.density[atom1]); | 
| 365 | idat.rho2 = &(snap_->atomData.density[atom2]); | 
| 366 | } | 
| 367 |  | 
| 368 | if (storageLayout_ & DataStorage::dslFunctionalDerivative) { | 
| 369 | idat.dfrho1 = &(snap_->atomData.functionalDerivative[atom1]); | 
| 370 | idat.dfrho2 = &(snap_->atomData.functionalDerivative[atom2]); | 
| 371 | } | 
| 372 | #endif | 
| 373 |  | 
| 374 | } | 
| 375 | InteractionData ForceMatrixDecomposition::fillSkipData(int atom1, int atom2){ | 
| 376 | InteractionData idat; | 
| 377 | skippedCharge1 | 
| 378 | skippedCharge2 | 
| 379 | rij | 
| 380 | d | 
| 381 | electroMult | 
| 382 | sw | 
| 383 | f | 
| 384 | #ifdef IS_MPI | 
| 385 |  | 
| 386 | if (storageLayout_ & DataStorage::dslElectroFrame) { | 
| 387 | idat.eFrame1 = &(atomRowData.electroFrame[atom1]); | 
| 388 | idat.eFrame2 = &(atomColData.electroFrame[atom2]); | 
| 389 | } | 
| 390 | if (storageLayout_ & DataStorage::dslTorque) { | 
| 391 | idat.t1 = &(atomRowData.torque[atom1]); | 
| 392 | idat.t2 = &(atomColData.torque[atom2]); | 
| 393 | } | 
| 394 |  | 
| 395 |  | 
| 396 | } | 
| 397 | SelfData ForceMatrixDecomposition::fillSelfData(int atom1) { | 
| 398 | } | 
| 399 |  | 
| 400 |  | 
| 401 | /* | 
| 402 | * buildNeighborList | 
| 403 | * | 
| 404 | * first element of pair is row-indexed CutoffGroup | 
| 405 | * second element of pair is column-indexed CutoffGroup | 
| 406 | */ | 
| 407 | vector<pair<int, int> >  buildNeighborList() { | 
| 408 | Vector3d dr, invWid, rs, shift; | 
| 409 | Vector3i cc, m1v, m2s; | 
| 410 | RealType rrNebr; | 
| 411 | int c, j1, j2, m1, m1x, m1y, m1z, m2, n, offset; | 
| 412 |  | 
| 413 |  | 
| 414 | vector<pair<int, int> > neighborList; | 
| 415 | Vector3i nCells; | 
| 416 | Vector3d invWid, r; | 
| 417 |  | 
| 418 | rList_ = (rCut_ + skinThickness_); | 
| 419 | rl2 = rList_ * rList_; | 
| 420 |  | 
| 421 | snap_ = sman_->getCurrentSnapshot(); | 
| 422 | Mat3x3d Hmat = snap_->getHmat(); | 
| 423 | Vector3d Hx = Hmat.getColumn(0); | 
| 424 | Vector3d Hy = Hmat.getColumn(1); | 
| 425 | Vector3d Hz = Hmat.getColumn(2); | 
| 426 |  | 
| 427 | nCells.x() = (int) ( Hx.length() )/ rList_; | 
| 428 | nCells.y() = (int) ( Hy.length() )/ rList_; | 
| 429 | nCells.z() = (int) ( Hz.length() )/ rList_; | 
| 430 |  | 
| 431 | for (i = 0; i < nGroupsInRow; i++) { | 
| 432 | rs = cgRowData.position[i]; | 
| 433 | snap_->scaleVector(rs); | 
| 434 | } | 
| 435 |  | 
| 436 |  | 
| 437 | VDiv (invWid, cells, region); | 
| 438 | for (n = nMol; n < nMol + cells.componentProduct(); n ++) cellList[n] = -1; | 
| 439 | for (n = 0; n < nMol; n ++) { | 
| 440 | VSAdd (rs, mol[n].r, 0.5, region); | 
| 441 | VMul (cc, rs, invWid); | 
| 442 | c = VLinear (cc, cells) + nMol; | 
| 443 | cellList[n] = cellList[c]; | 
| 444 | cellList[c] = n; | 
| 445 | } | 
| 446 | nebrTabLen = 0; | 
| 447 | for (m1z = 0; m1z < cells.z(); m1z++) { | 
| 448 | for (m1y = 0; m1y < cells.y(); m1y++) { | 
| 449 | for (m1x = 0; m1x < cells.x(); m1x++) { | 
| 450 | Vector3i m1v(m1x, m1y, m1z); | 
| 451 | m1 = VLinear(m1v, cells) + nMol; | 
| 452 | for (offset = 0; offset < nOffset_; offset++) { | 
| 453 | m2v = m1v + cellOffsets_[offset]; | 
| 454 | shift = V3Zero(); | 
| 455 |  | 
| 456 | if (m2v.x() >= cells.x) { | 
| 457 | m2v.x() = 0; | 
| 458 | shift.x() = region.x(); | 
| 459 | } else if (m2v.x() < 0) { | 
| 460 | m2v.x() = cells.x() - 1; | 
| 461 | shift.x() = - region.x(); | 
| 462 | } | 
| 463 |  | 
| 464 | if (m2v.y() >= cells.y()) { | 
| 465 | m2v.y() = 0; | 
| 466 | shift.y() = region.y(); | 
| 467 | } else if (m2v.y() < 0) { | 
| 468 | m2v.y() = cells.y() - 1; | 
| 469 | shift.y() = - region.y(); | 
| 470 | } | 
| 471 |  | 
| 472 | m2 = VLinear (m2v, cells) + nMol; | 
| 473 | for (j1 = cellList[m1]; j1 >= 0; j1 = cellList[j1]) { | 
| 474 | for (j2 = cellList[m2]; j2 >= 0; j2 = cellList[j2]) { | 
| 475 | if (m1 != m2 || j2 < j1) { | 
| 476 | dr = mol[j1].r - mol[j2].r; | 
| 477 | VSub (dr, mol[j1].r, mol[j2].r); | 
| 478 | VVSub (dr, shift); | 
| 479 | if (VLenSq (dr) < rrNebr) { | 
| 480 | neighborList.push_back(make_pair(j1, j2)); | 
| 481 | } | 
| 482 | } | 
| 483 | } | 
| 484 | } | 
| 485 | } | 
| 486 | } | 
| 487 | } | 
| 488 | } | 
| 489 | } | 
| 490 |  | 
| 491 |  | 
| 492 | } //end namespace OpenMD |