| 1 |
< |
/** |
| 2 |
< |
* @file ForceDecomposition.cpp |
| 3 |
< |
* @author Charles Vardeman <cvardema.at.nd.edu> |
| 4 |
< |
* @date 08/18/2010 |
| 5 |
< |
* @time 11:56am |
| 6 |
< |
* @version 1.0 |
| 1 |
> |
/* |
| 2 |
> |
* Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. |
| 3 |
|
* |
| 8 |
– |
* @section LICENSE |
| 9 |
– |
* Copyright (c) 2010 The University of Notre Dame. All Rights Reserved. |
| 10 |
– |
* |
| 4 |
|
* The University of Notre Dame grants you ("Licensee") a |
| 5 |
|
* non-exclusive, royalty free, license to use, modify and |
| 6 |
|
* redistribute this software in source and binary code form, provided |
| 38 |
|
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
| 39 |
|
* [4] Vardeman & Gezelter, in progress (2009). |
| 40 |
|
*/ |
| 41 |
+ |
#include "parallel/ForceMatrixDecomposition.hpp" |
| 42 |
+ |
#include "math/SquareMatrix3.hpp" |
| 43 |
+ |
#include "nonbonded/NonBondedInteraction.hpp" |
| 44 |
+ |
#include "brains/SnapshotManager.hpp" |
| 45 |
|
|
| 46 |
+ |
using namespace std; |
| 47 |
+ |
namespace OpenMD { |
| 48 |
|
|
| 49 |
+ |
/** |
| 50 |
+ |
* distributeInitialData is essentially a copy of the older fortran |
| 51 |
+ |
* SimulationSetup |
| 52 |
+ |
*/ |
| 53 |
+ |
|
| 54 |
+ |
void ForceMatrixDecomposition::distributeInitialData() { |
| 55 |
+ |
snap_ = sman_->getCurrentSnapshot(); |
| 56 |
+ |
storageLayout_ = sman_->getStorageLayout(); |
| 57 |
+ |
#ifdef IS_MPI |
| 58 |
+ |
int nLocal = snap_->getNumberOfAtoms(); |
| 59 |
+ |
int nGroups = snap_->getNumberOfCutoffGroups(); |
| 60 |
+ |
|
| 61 |
+ |
AtomCommIntRow = new Communicator<Row,int>(nLocal); |
| 62 |
+ |
AtomCommRealRow = new Communicator<Row,RealType>(nLocal); |
| 63 |
+ |
AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal); |
| 64 |
+ |
AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal); |
| 65 |
|
|
| 66 |
< |
/* -*- c++ -*- */ |
| 67 |
< |
#include "config.h" |
| 68 |
< |
#include <stdlib.h> |
| 66 |
> |
AtomCommIntColumn = new Communicator<Column,int>(nLocal); |
| 67 |
> |
AtomCommRealColumn = new Communicator<Column,RealType>(nLocal); |
| 68 |
> |
AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal); |
| 69 |
> |
AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal); |
| 70 |
> |
|
| 71 |
> |
cgCommIntRow = new Communicator<Row,int>(nGroups); |
| 72 |
> |
cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups); |
| 73 |
> |
cgCommIntColumn = new Communicator<Column,int>(nGroups); |
| 74 |
> |
cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups); |
| 75 |
> |
|
| 76 |
> |
int nAtomsInRow = AtomCommIntRow->getSize(); |
| 77 |
> |
int nAtomsInCol = AtomCommIntColumn->getSize(); |
| 78 |
> |
int nGroupsInRow = cgCommIntRow->getSize(); |
| 79 |
> |
int nGroupsInCol = cgCommIntColumn->getSize(); |
| 80 |
> |
|
| 81 |
> |
// Modify the data storage objects with the correct layouts and sizes: |
| 82 |
> |
atomRowData.resize(nAtomsInRow); |
| 83 |
> |
atomRowData.setStorageLayout(storageLayout_); |
| 84 |
> |
atomColData.resize(nAtomsInCol); |
| 85 |
> |
atomColData.setStorageLayout(storageLayout_); |
| 86 |
> |
cgRowData.resize(nGroupsInRow); |
| 87 |
> |
cgRowData.setStorageLayout(DataStorage::dslPosition); |
| 88 |
> |
cgColData.resize(nGroupsInCol); |
| 89 |
> |
cgColData.setStorageLayout(DataStorage::dslPosition); |
| 90 |
> |
|
| 91 |
> |
vector<vector<RealType> > pot_row(N_INTERACTION_FAMILIES, |
| 92 |
> |
vector<RealType> (nAtomsInRow, 0.0)); |
| 93 |
> |
vector<vector<RealType> > pot_col(N_INTERACTION_FAMILIES, |
| 94 |
> |
vector<RealType> (nAtomsInCol, 0.0)); |
| 95 |
> |
|
| 96 |
> |
|
| 97 |
> |
vector<RealType> pot_local(N_INTERACTION_FAMILIES, 0.0); |
| 98 |
> |
|
| 99 |
> |
// gather the information for atomtype IDs (atids): |
| 100 |
> |
vector<int> identsLocal = info_->getIdentArray(); |
| 101 |
> |
identsRow.reserve(nAtomsInRow); |
| 102 |
> |
identsCol.reserve(nAtomsInCol); |
| 103 |
> |
|
| 104 |
> |
AtomCommIntRow->gather(identsLocal, identsRow); |
| 105 |
> |
AtomCommIntColumn->gather(identsLocal, identsCol); |
| 106 |
> |
|
| 107 |
> |
AtomLocalToGlobal = info_->getGlobalAtomIndices(); |
| 108 |
> |
AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); |
| 109 |
> |
AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); |
| 110 |
> |
|
| 111 |
> |
cgLocalToGlobal = info_->getGlobalGroupIndices(); |
| 112 |
> |
cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal); |
| 113 |
> |
cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal); |
| 114 |
> |
|
| 115 |
> |
// still need: |
| 116 |
> |
// topoDist |
| 117 |
> |
// exclude |
| 118 |
> |
#endif |
| 119 |
> |
} |
| 120 |
> |
|
| 121 |
> |
|
| 122 |
> |
|
| 123 |
> |
void ForceMatrixDecomposition::distributeData() { |
| 124 |
> |
snap_ = sman_->getCurrentSnapshot(); |
| 125 |
> |
storageLayout_ = sman_->getStorageLayout(); |
| 126 |
|
#ifdef IS_MPI |
| 127 |
< |
#include <mpi.h> |
| 127 |
> |
|
| 128 |
> |
// gather up the atomic positions |
| 129 |
> |
AtomCommVectorRow->gather(snap_->atomData.position, |
| 130 |
> |
atomRowData.position); |
| 131 |
> |
AtomCommVectorColumn->gather(snap_->atomData.position, |
| 132 |
> |
atomColData.position); |
| 133 |
> |
|
| 134 |
> |
// gather up the cutoff group positions |
| 135 |
> |
cgCommVectorRow->gather(snap_->cgData.position, |
| 136 |
> |
cgRowData.position); |
| 137 |
> |
cgCommVectorColumn->gather(snap_->cgData.position, |
| 138 |
> |
cgColData.position); |
| 139 |
> |
|
| 140 |
> |
// if needed, gather the atomic rotation matrices |
| 141 |
> |
if (storageLayout_ & DataStorage::dslAmat) { |
| 142 |
> |
AtomCommMatrixRow->gather(snap_->atomData.aMat, |
| 143 |
> |
atomRowData.aMat); |
| 144 |
> |
AtomCommMatrixColumn->gather(snap_->atomData.aMat, |
| 145 |
> |
atomColData.aMat); |
| 146 |
> |
} |
| 147 |
> |
|
| 148 |
> |
// if needed, gather the atomic eletrostatic frames |
| 149 |
> |
if (storageLayout_ & DataStorage::dslElectroFrame) { |
| 150 |
> |
AtomCommMatrixRow->gather(snap_->atomData.electroFrame, |
| 151 |
> |
atomRowData.electroFrame); |
| 152 |
> |
AtomCommMatrixColumn->gather(snap_->atomData.electroFrame, |
| 153 |
> |
atomColData.electroFrame); |
| 154 |
> |
} |
| 155 |
> |
#endif |
| 156 |
> |
} |
| 157 |
> |
|
| 158 |
> |
void ForceMatrixDecomposition::collectIntermediateData() { |
| 159 |
> |
snap_ = sman_->getCurrentSnapshot(); |
| 160 |
> |
storageLayout_ = sman_->getStorageLayout(); |
| 161 |
> |
#ifdef IS_MPI |
| 162 |
> |
|
| 163 |
> |
if (storageLayout_ & DataStorage::dslDensity) { |
| 164 |
> |
|
| 165 |
> |
AtomCommRealRow->scatter(atomRowData.density, |
| 166 |
> |
snap_->atomData.density); |
| 167 |
> |
|
| 168 |
> |
int n = snap_->atomData.density.size(); |
| 169 |
> |
std::vector<RealType> rho_tmp(n, 0.0); |
| 170 |
> |
AtomCommRealColumn->scatter(atomColData.density, rho_tmp); |
| 171 |
> |
for (int i = 0; i < n; i++) |
| 172 |
> |
snap_->atomData.density[i] += rho_tmp[i]; |
| 173 |
> |
} |
| 174 |
|
#endif |
| 175 |
+ |
} |
| 176 |
+ |
|
| 177 |
+ |
void ForceMatrixDecomposition::distributeIntermediateData() { |
| 178 |
+ |
snap_ = sman_->getCurrentSnapshot(); |
| 179 |
+ |
storageLayout_ = sman_->getStorageLayout(); |
| 180 |
+ |
#ifdef IS_MPI |
| 181 |
+ |
if (storageLayout_ & DataStorage::dslFunctional) { |
| 182 |
+ |
AtomCommRealRow->gather(snap_->atomData.functional, |
| 183 |
+ |
atomRowData.functional); |
| 184 |
+ |
AtomCommRealColumn->gather(snap_->atomData.functional, |
| 185 |
+ |
atomColData.functional); |
| 186 |
+ |
} |
| 187 |
+ |
|
| 188 |
+ |
if (storageLayout_ & DataStorage::dslFunctionalDerivative) { |
| 189 |
+ |
AtomCommRealRow->gather(snap_->atomData.functionalDerivative, |
| 190 |
+ |
atomRowData.functionalDerivative); |
| 191 |
+ |
AtomCommRealColumn->gather(snap_->atomData.functionalDerivative, |
| 192 |
+ |
atomColData.functionalDerivative); |
| 193 |
+ |
} |
| 194 |
+ |
#endif |
| 195 |
+ |
} |
| 196 |
+ |
|
| 197 |
+ |
|
| 198 |
+ |
void ForceMatrixDecomposition::collectData() { |
| 199 |
+ |
snap_ = sman_->getCurrentSnapshot(); |
| 200 |
+ |
storageLayout_ = sman_->getStorageLayout(); |
| 201 |
+ |
#ifdef IS_MPI |
| 202 |
+ |
int n = snap_->atomData.force.size(); |
| 203 |
+ |
vector<Vector3d> frc_tmp(n, V3Zero); |
| 204 |
+ |
|
| 205 |
+ |
AtomCommVectorRow->scatter(atomRowData.force, frc_tmp); |
| 206 |
+ |
for (int i = 0; i < n; i++) { |
| 207 |
+ |
snap_->atomData.force[i] += frc_tmp[i]; |
| 208 |
+ |
frc_tmp[i] = 0.0; |
| 209 |
+ |
} |
| 210 |
+ |
|
| 211 |
+ |
AtomCommVectorColumn->scatter(atomColData.force, frc_tmp); |
| 212 |
+ |
for (int i = 0; i < n; i++) |
| 213 |
+ |
snap_->atomData.force[i] += frc_tmp[i]; |
| 214 |
+ |
|
| 215 |
+ |
|
| 216 |
+ |
if (storageLayout_ & DataStorage::dslTorque) { |
| 217 |
|
|
| 218 |
< |
#include <iostream> |
| 219 |
< |
#include <vector> |
| 60 |
< |
#include <algorithm> |
| 61 |
< |
#include <cmath> |
| 62 |
< |
#include "parallel/ForceDecomposition.hpp" |
| 218 |
> |
int nt = snap_->atomData.force.size(); |
| 219 |
> |
vector<Vector3d> trq_tmp(nt, V3Zero); |
| 220 |
|
|
| 221 |
+ |
AtomCommVectorRow->scatter(atomRowData.torque, trq_tmp); |
| 222 |
+ |
for (int i = 0; i < n; i++) { |
| 223 |
+ |
snap_->atomData.torque[i] += trq_tmp[i]; |
| 224 |
+ |
trq_tmp[i] = 0.0; |
| 225 |
+ |
} |
| 226 |
+ |
|
| 227 |
+ |
AtomCommVectorColumn->scatter(atomColData.torque, trq_tmp); |
| 228 |
+ |
for (int i = 0; i < n; i++) |
| 229 |
+ |
snap_->atomData.torque[i] += trq_tmp[i]; |
| 230 |
+ |
} |
| 231 |
+ |
|
| 232 |
+ |
int nLocal = snap_->getNumberOfAtoms(); |
| 233 |
|
|
| 234 |
< |
using namespace std; |
| 235 |
< |
using namespace OpenMD; |
| 234 |
> |
vector<vector<RealType> > pot_temp(N_INTERACTION_FAMILIES, |
| 235 |
> |
vector<RealType> (nLocal, 0.0)); |
| 236 |
> |
|
| 237 |
> |
for (int i = 0; i < N_INTERACTION_FAMILIES; i++) { |
| 238 |
> |
AtomCommRealRow->scatter(pot_row[i], pot_temp[i]); |
| 239 |
> |
for (int ii = 0; ii < pot_temp[i].size(); ii++ ) { |
| 240 |
> |
pot_local[i] += pot_temp[i][ii]; |
| 241 |
> |
} |
| 242 |
> |
} |
| 243 |
> |
#endif |
| 244 |
> |
} |
| 245 |
|
|
| 246 |
< |
//__static |
| 246 |
> |
|
| 247 |
> |
Vector3d ForceMatrixDecomposition::getIntergroupVector(int cg1, int cg2){ |
| 248 |
> |
Vector3d d; |
| 249 |
> |
|
| 250 |
|
#ifdef IS_MPI |
| 251 |
< |
static vector<MPI:Comm> communictors; |
| 251 |
> |
d = cgColData.position[cg2] - cgRowData.position[cg1]; |
| 252 |
> |
#else |
| 253 |
> |
d = snap_->cgData.position[cg2] - snap_->cgData.position[cg1]; |
| 254 |
|
#endif |
| 255 |
+ |
|
| 256 |
+ |
snap_->wrapVector(d); |
| 257 |
+ |
return d; |
| 258 |
+ |
} |
| 259 |
|
|
| 73 |
– |
//____ MPITypeTraits |
| 74 |
– |
template<typename T> |
| 75 |
– |
struct MPITypeTraits; |
| 260 |
|
|
| 261 |
+ |
Vector3d ForceMatrixDecomposition::getAtomToGroupVectorRow(int atom1, int cg1){ |
| 262 |
+ |
|
| 263 |
+ |
Vector3d d; |
| 264 |
+ |
|
| 265 |
|
#ifdef IS_MPI |
| 266 |
< |
template<> |
| 267 |
< |
struct MPITypeTraits<RealType> { |
| 268 |
< |
static const MPI::Datatype datatype; |
| 269 |
< |
}; |
| 82 |
< |
const MPI_Datatype MPITypeTraits<RealType>::datatype = MY_MPI_REAL; |
| 266 |
> |
d = cgRowData.position[cg1] - atomRowData.position[atom1]; |
| 267 |
> |
#else |
| 268 |
> |
d = snap_->cgData.position[cg1] - snap_->atomData.position[atom1]; |
| 269 |
> |
#endif |
| 270 |
|
|
| 271 |
< |
template<> |
| 272 |
< |
struct MPITypeTraits<int> { |
| 273 |
< |
static const MPI::Datatype datatype; |
| 274 |
< |
}; |
| 275 |
< |
const MPI::Datatype MPITypeTraits<int>::datatype = MPI_INT; |
| 271 |
> |
snap_->wrapVector(d); |
| 272 |
> |
return d; |
| 273 |
> |
} |
| 274 |
> |
|
| 275 |
> |
Vector3d ForceMatrixDecomposition::getAtomToGroupVectorColumn(int atom2, int cg2){ |
| 276 |
> |
Vector3d d; |
| 277 |
> |
|
| 278 |
> |
#ifdef IS_MPI |
| 279 |
> |
d = cgColData.position[cg2] - atomColData.position[atom2]; |
| 280 |
> |
#else |
| 281 |
> |
d = snap_->cgData.position[cg2] - snap_->atomData.position[atom2]; |
| 282 |
|
#endif |
| 283 |
+ |
|
| 284 |
+ |
snap_->wrapVector(d); |
| 285 |
+ |
return d; |
| 286 |
+ |
} |
| 287 |
+ |
|
| 288 |
+ |
Vector3d ForceMatrixDecomposition::getInteratomicVector(int atom1, int atom2){ |
| 289 |
+ |
Vector3d d; |
| 290 |
+ |
|
| 291 |
+ |
#ifdef IS_MPI |
| 292 |
+ |
d = atomColData.position[atom2] - atomRowData.position[atom1]; |
| 293 |
+ |
#else |
| 294 |
+ |
d = snap_->atomData.position[atom2] - snap_->atomData.position[atom1]; |
| 295 |
+ |
#endif |
| 296 |
|
|
| 297 |
< |
/** |
| 298 |
< |
* Constructor for ForceDecomposition Parallel Decomposition Method |
| 299 |
< |
* Will try to construct a symmetric grid of processors. Ideally, the |
| 94 |
< |
* number of processors will be a square ex: 4, 9, 16, 25. |
| 95 |
< |
* |
| 96 |
< |
*/ |
| 297 |
> |
snap_->wrapVector(d); |
| 298 |
> |
return d; |
| 299 |
> |
} |
| 300 |
|
|
| 301 |
< |
ForceDecomposition::ForceDecomposition() { |
| 301 |
> |
void ForceMatrixDecomposition::addForceToAtomRow(int atom1, Vector3d fg){ |
| 302 |
> |
#ifdef IS_MPI |
| 303 |
> |
atomRowData.force[atom1] += fg; |
| 304 |
> |
#else |
| 305 |
> |
snap_->atomData.force[atom1] += fg; |
| 306 |
> |
#endif |
| 307 |
> |
} |
| 308 |
|
|
| 309 |
+ |
void ForceMatrixDecomposition::addForceToAtomColumn(int atom2, Vector3d fg){ |
| 310 |
|
#ifdef IS_MPI |
| 311 |
< |
int nProcs = MPI::COMM_WORLD.Get_size(); |
| 312 |
< |
int worldRank = MPI::COMM_WORLD.Get_rank(); |
| 311 |
> |
atomColData.force[atom2] += fg; |
| 312 |
> |
#else |
| 313 |
> |
snap_->atomData.force[atom2] += fg; |
| 314 |
|
#endif |
| 315 |
|
|
| 105 |
– |
// First time through, construct column stride. |
| 106 |
– |
if (communicators.size() == 0) |
| 107 |
– |
{ |
| 108 |
– |
int nColumnsMax = (int) round(sqrt((float) nProcs)); |
| 109 |
– |
for (int i = 0; i < nProcs; ++i) |
| 110 |
– |
{ |
| 111 |
– |
if (nProcs%i==0) nColumns=i; |
| 112 |
– |
} |
| 113 |
– |
|
| 114 |
– |
int nRows = nProcs/nColumns; |
| 115 |
– |
myRank_ = (int) worldRank%nColumns; |
| 316 |
|
} |
| 117 |
– |
else |
| 118 |
– |
{ |
| 119 |
– |
myRank_ = myRank/nColumns; |
| 120 |
– |
} |
| 121 |
– |
MPI::Comm newComm = MPI:COMM_WORLD.Split(myRank_,0); |
| 122 |
– |
|
| 123 |
– |
isColumn_ = false; |
| 124 |
– |
|
| 125 |
– |
} |
| 317 |
|
|
| 318 |
< |
ForceDecomposition::gather(sendbuf, receivebuf){ |
| 319 |
< |
communicators(myIndex_).Allgatherv(); |
| 129 |
< |
} |
| 318 |
> |
// filling interaction blocks with pointers |
| 319 |
> |
InteractionData ForceMatrixDecomposition::fillInteractionData(int atom1, int atom2) { |
| 320 |
|
|
| 321 |
+ |
InteractionData idat; |
| 322 |
+ |
#ifdef IS_MPI |
| 323 |
+ |
if (storageLayout_ & DataStorage::dslAmat) { |
| 324 |
+ |
idat.A1 = atomRowData.aMat[atom1]; |
| 325 |
+ |
idat.A2 = atomColData.aMat[atom2]; |
| 326 |
+ |
} |
| 327 |
|
|
| 328 |
+ |
if (storageLayout_ & DataStorage::dslElectroFrame) { |
| 329 |
+ |
idat.eFrame1 = atomRowData.electroFrame[atom1]; |
| 330 |
+ |
idat.eFrame2 = atomColData.electroFrame[atom2]; |
| 331 |
+ |
} |
| 332 |
|
|
| 333 |
< |
ForceDecomposition::scatter(sbuffer, rbuffer){ |
| 334 |
< |
communicators(myIndex_).Reduce_scatter(sbuffer, recevbuf. recvcounts, MPI::DOUBLE, MPI::SUM); |
| 335 |
< |
} |
| 333 |
> |
if (storageLayout_ & DataStorage::dslTorque) { |
| 334 |
> |
idat.t1 = atomRowData.torque[atom1]; |
| 335 |
> |
idat.t2 = atomColData.torque[atom2]; |
| 336 |
> |
} |
| 337 |
|
|
| 338 |
+ |
if (storageLayout_ & DataStorage::dslDensity) { |
| 339 |
+ |
idat.rho1 = atomRowData.density[atom1]; |
| 340 |
+ |
idat.rho2 = atomColData.density[atom2]; |
| 341 |
+ |
} |
| 342 |
|
|
| 343 |
+ |
if (storageLayout_ & DataStorage::dslFunctionalDerivative) { |
| 344 |
+ |
idat.dfrho1 = atomRowData.functionalDerivative[atom1]; |
| 345 |
+ |
idat.dfrho2 = atomColData.functionalDerivative[atom2]; |
| 346 |
+ |
} |
| 347 |
+ |
#endif |
| 348 |
+ |
|
| 349 |
+ |
} |
| 350 |
+ |
InteractionData ForceMatrixDecomposition::fillSkipData(int atom1, int atom2){ |
| 351 |
+ |
} |
| 352 |
+ |
SelfData ForceMatrixDecomposition::fillSelfData(int atom1) { |
| 353 |
+ |
} |
| 354 |
+ |
|
| 355 |
+ |
|
| 356 |
+ |
} //end namespace OpenMD |