| 1 | < | /** | 
| 2 | < | * @file ForceDecomposition.cpp | 
| 3 | < | * @author Charles Vardeman <cvardema.at.nd.edu> | 
| 4 | < | * @date 08/18/2010 | 
| 5 | < | * @time 11:56am | 
| 6 | < | * @version 1.0 | 
| 1 | > | /* | 
| 2 | > | * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. | 
| 3 |  | * | 
| 8 | – | * @section LICENSE | 
| 9 | – | * Copyright (c) 2010 The University of Notre Dame. All Rights Reserved. | 
| 10 | – | * | 
| 4 |  | * The University of Notre Dame grants you ("Licensee") a | 
| 5 |  | * non-exclusive, royalty free, license to use, modify and | 
| 6 |  | * redistribute this software in source and binary code form, provided | 
| 38 |  | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 |  | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 40 |  | */ | 
| 41 | + | #include "parallel/ForceMatrixDecomposition.hpp" | 
| 42 | + | #include "math/SquareMatrix3.hpp" | 
| 43 | + | #include "nonbonded/NonBondedInteraction.hpp" | 
| 44 | + | #include "brains/SnapshotManager.hpp" | 
| 45 |  |  | 
| 46 | + | using namespace std; | 
| 47 | + | namespace OpenMD { | 
| 48 |  |  | 
| 49 | + | /** | 
| 50 | + | * distributeInitialData is essentially a copy of the older fortran | 
| 51 | + | * SimulationSetup | 
| 52 | + | */ | 
| 53 | + |  | 
| 54 | + | void ForceMatrixDecomposition::distributeInitialData() { | 
| 55 | + | #ifdef IS_MPI | 
| 56 | + | Snapshot* snap = sman_->getCurrentSnapshot(); | 
| 57 | + | int nLocal = snap->getNumberOfAtoms(); | 
| 58 | + | int nGroups = snap->getNumberOfCutoffGroups(); | 
| 59 |  |  | 
| 60 | < | /*  -*- c++ -*-  */ | 
| 61 | < | #include "config.h" | 
| 62 | < | #include <stdlib.h> | 
| 63 | < | #ifdef IS_MPI | 
| 55 | < | #include <mpi.h> | 
| 56 | < | #endif | 
| 60 | > | AtomCommIntRow = new Communicator<Row,int>(nLocal); | 
| 61 | > | AtomCommRealRow = new Communicator<Row,RealType>(nLocal); | 
| 62 | > | AtomCommVectorRow = new Communicator<Row,Vector3d>(nLocal); | 
| 63 | > | AtomCommMatrixRow = new Communicator<Row,Mat3x3d>(nLocal); | 
| 64 |  |  | 
| 65 | < | #include <iostream> | 
| 66 | < | #include <vector> | 
| 67 | < | #include <algorithm> | 
| 68 | < | #include <cmath> | 
| 62 | < | #include "parallel/ForceDecomposition.hpp" | 
| 65 | > | AtomCommIntColumn = new Communicator<Column,int>(nLocal); | 
| 66 | > | AtomCommRealColumn = new Communicator<Column,RealType>(nLocal); | 
| 67 | > | AtomCommVectorColumn = new Communicator<Column,Vector3d>(nLocal); | 
| 68 | > | AtomCommMatrixColumn = new Communicator<Column,Mat3x3d>(nLocal); | 
| 69 |  |  | 
| 70 | + | cgCommIntRow = new Communicator<Row,int>(nGroups); | 
| 71 | + | cgCommVectorRow = new Communicator<Row,Vector3d>(nGroups); | 
| 72 | + | cgCommIntColumn = new Communicator<Column,int>(nGroups); | 
| 73 | + | cgCommVectorColumn = new Communicator<Column,Vector3d>(nGroups); | 
| 74 |  |  | 
| 75 | < | using namespace std; | 
| 76 | < | using namespace OpenMD; | 
| 75 | > | int nAtomsInRow = AtomCommIntRow->getSize(); | 
| 76 | > | int nAtomsInCol = AtomCommIntColumn->getSize(); | 
| 77 | > | int nGroupsInRow = cgCommIntRow->getSize(); | 
| 78 | > | int nGroupsInCol = cgCommIntColumn->getSize(); | 
| 79 | > |  | 
| 80 | > | vector<vector<RealType> > pot_row(N_INTERACTION_FAMILIES, | 
| 81 | > | vector<RealType> (nAtomsInRow, 0.0)); | 
| 82 | > | vector<vector<RealType> > pot_col(N_INTERACTION_FAMILIES, | 
| 83 | > | vector<RealType> (nAtomsInCol, 0.0)); | 
| 84 | > |  | 
| 85 | > | vector<RealType> pot_local(N_INTERACTION_FAMILIES, 0.0); | 
| 86 | > |  | 
| 87 | > | // gather the information for atomtype IDs (atids): | 
| 88 | > | vector<int> identsLocal = info_->getIdentArray(); | 
| 89 | > | identsRow.reserve(nAtomsInRow); | 
| 90 | > | identsCol.reserve(nAtomsInCol); | 
| 91 | > |  | 
| 92 | > | AtomCommIntRow->gather(identsLocal, identsRow); | 
| 93 | > | AtomCommIntColumn->gather(identsLocal, identsCol); | 
| 94 | > |  | 
| 95 | > | AtomLocalToGlobal = info_->getGlobalAtomIndices(); | 
| 96 | > | AtomCommIntRow->gather(AtomLocalToGlobal, AtomRowToGlobal); | 
| 97 | > | AtomCommIntColumn->gather(AtomLocalToGlobal, AtomColToGlobal); | 
| 98 | > |  | 
| 99 | > | cgLocalToGlobal = info_->getGlobalGroupIndices(); | 
| 100 | > | cgCommIntRow->gather(cgLocalToGlobal, cgRowToGlobal); | 
| 101 | > | cgCommIntColumn->gather(cgLocalToGlobal, cgColToGlobal); | 
| 102 |  |  | 
| 103 | < | //__static | 
| 104 | < | #ifdef IS_MPI | 
| 105 | < | static vector<MPI:Comm> communictors; | 
| 103 | > | // still need: | 
| 104 | > | // topoDist | 
| 105 | > | // exclude | 
| 106 |  | #endif | 
| 107 | + | } | 
| 108 | + |  | 
| 109 |  |  | 
| 73 | – | //____ MPITypeTraits | 
| 74 | – | template<typename T> | 
| 75 | – | struct MPITypeTraits; | 
| 110 |  |  | 
| 111 | + | void ForceMatrixDecomposition::distributeData()  { | 
| 112 |  | #ifdef IS_MPI | 
| 113 | < | template<> | 
| 114 | < | struct MPITypeTraits<RealType> { | 
| 115 | < | static const MPI::Datatype datatype; | 
| 116 | < | }; | 
| 117 | < | const MPI_Datatype MPITypeTraits<RealType>::datatype = MY_MPI_REAL; | 
| 113 | > | Snapshot* snap = sman_->getCurrentSnapshot(); | 
| 114 | > |  | 
| 115 | > | // gather up the atomic positions | 
| 116 | > | AtomCommVectorRow->gather(snap->atomData.position, | 
| 117 | > | snap->atomIData.position); | 
| 118 | > | AtomCommVectorColumn->gather(snap->atomData.position, | 
| 119 | > | snap->atomJData.position); | 
| 120 | > |  | 
| 121 | > | // gather up the cutoff group positions | 
| 122 | > | cgCommVectorRow->gather(snap->cgData.position, | 
| 123 | > | snap->cgIData.position); | 
| 124 | > | cgCommVectorColumn->gather(snap->cgData.position, | 
| 125 | > | snap->cgJData.position); | 
| 126 | > |  | 
| 127 | > | // if needed, gather the atomic rotation matrices | 
| 128 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslAmat) { | 
| 129 | > | AtomCommMatrixRow->gather(snap->atomData.aMat, | 
| 130 | > | snap->atomIData.aMat); | 
| 131 | > | AtomCommMatrixColumn->gather(snap->atomData.aMat, | 
| 132 | > | snap->atomJData.aMat); | 
| 133 | > | } | 
| 134 | > |  | 
| 135 | > | // if needed, gather the atomic eletrostatic frames | 
| 136 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslElectroFrame) { | 
| 137 | > | AtomCommMatrixRow->gather(snap->atomData.electroFrame, | 
| 138 | > | snap->atomIData.electroFrame); | 
| 139 | > | AtomCommMatrixColumn->gather(snap->atomData.electroFrame, | 
| 140 | > | snap->atomJData.electroFrame); | 
| 141 | > | } | 
| 142 | > | #endif | 
| 143 | > | } | 
| 144 | > |  | 
| 145 | > | void ForceMatrixDecomposition::collectIntermediateData() { | 
| 146 | > | #ifdef IS_MPI | 
| 147 | > | Snapshot* snap = sman_->getCurrentSnapshot(); | 
| 148 | > |  | 
| 149 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslDensity) { | 
| 150 |  |  | 
| 151 | < | template<> | 
| 152 | < | struct MPITypeTraits<int> { | 
| 86 | < | static const MPI::Datatype datatype; | 
| 87 | < | }; | 
| 88 | < | const MPI::Datatype MPITypeTraits<int>::datatype = MPI_INT; | 
| 89 | < | #endif | 
| 151 | > | AtomCommRealRow->scatter(snap->atomIData.density, | 
| 152 | > | snap->atomData.density); | 
| 153 |  |  | 
| 154 | < | /** | 
| 155 | < | * Constructor for ForceDecomposition Parallel Decomposition Method | 
| 156 | < | * Will try to construct a symmetric grid of processors. Ideally, the | 
| 157 | < | * number of processors will be a square ex: 4, 9, 16, 25. | 
| 158 | < | * | 
| 159 | < | */ | 
| 97 | < |  | 
| 98 | < | ForceDecomposition::ForceDecomposition() { | 
| 99 | < |  | 
| 100 | < | #ifdef IS_MPI | 
| 101 | < | int nProcs = MPI::COMM_WORLD.Get_size(); | 
| 102 | < | int worldRank = MPI::COMM_WORLD.Get_rank(); | 
| 154 | > | int n = snap->atomData.density.size(); | 
| 155 | > | std::vector<RealType> rho_tmp(n, 0.0); | 
| 156 | > | AtomCommRealColumn->scatter(snap->atomJData.density, rho_tmp); | 
| 157 | > | for (int i = 0; i < n; i++) | 
| 158 | > | snap->atomData.density[i] += rho_tmp[i]; | 
| 159 | > | } | 
| 160 |  | #endif | 
| 161 | < |  | 
| 162 | < | // First time through, construct column stride. | 
| 163 | < | if (communicators.size() == 0) | 
| 164 | < | { | 
| 165 | < | int nColumnsMax = (int) round(sqrt((float) nProcs)); | 
| 166 | < | for (int i = 0; i < nProcs; ++i) | 
| 167 | < | { | 
| 168 | < | if (nProcs%i==0) nColumns=i; | 
| 161 | > | } | 
| 162 | > |  | 
| 163 | > | void ForceMatrixDecomposition::distributeIntermediateData() { | 
| 164 | > | #ifdef IS_MPI | 
| 165 | > | Snapshot* snap = sman_->getCurrentSnapshot(); | 
| 166 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslFunctional) { | 
| 167 | > | AtomCommRealRow->gather(snap->atomData.functional, | 
| 168 | > | snap->atomIData.functional); | 
| 169 | > | AtomCommRealColumn->gather(snap->atomData.functional, | 
| 170 | > | snap->atomJData.functional); | 
| 171 |  | } | 
| 172 | < |  | 
| 173 | < | int nRows = nProcs/nColumns; | 
| 174 | < | myRank_ = (int) worldRank%nColumns; | 
| 172 | > |  | 
| 173 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslFunctionalDerivative) { | 
| 174 | > | AtomCommRealRow->gather(snap->atomData.functionalDerivative, | 
| 175 | > | snap->atomIData.functionalDerivative); | 
| 176 | > | AtomCommRealColumn->gather(snap->atomData.functionalDerivative, | 
| 177 | > | snap->atomJData.functionalDerivative); | 
| 178 | > | } | 
| 179 | > | #endif | 
| 180 |  | } | 
| 117 | – | else | 
| 118 | – | { | 
| 119 | – | myRank_ = myRank/nColumns; | 
| 120 | – | } | 
| 121 | – | MPI::Comm newComm = MPI:COMM_WORLD.Split(myRank_,0); | 
| 181 |  |  | 
| 123 | – | isColumn_ = false; | 
| 182 |  |  | 
| 183 | < | } | 
| 183 | > | void ForceMatrixDecomposition::collectData() { | 
| 184 | > | #ifdef IS_MPI | 
| 185 | > | Snapshot* snap = sman_->getCurrentSnapshot(); | 
| 186 | > |  | 
| 187 | > | int n = snap->atomData.force.size(); | 
| 188 | > | vector<Vector3d> frc_tmp(n, V3Zero); | 
| 189 | > |  | 
| 190 | > | AtomCommVectorRow->scatter(snap->atomIData.force, frc_tmp); | 
| 191 | > | for (int i = 0; i < n; i++) { | 
| 192 | > | snap->atomData.force[i] += frc_tmp[i]; | 
| 193 | > | frc_tmp[i] = 0.0; | 
| 194 | > | } | 
| 195 | > |  | 
| 196 | > | AtomCommVectorColumn->scatter(snap->atomJData.force, frc_tmp); | 
| 197 | > | for (int i = 0; i < n; i++) | 
| 198 | > | snap->atomData.force[i] += frc_tmp[i]; | 
| 199 | > |  | 
| 200 | > |  | 
| 201 | > | if (snap->atomData.getStorageLayout() & DataStorage::dslTorque) { | 
| 202 |  |  | 
| 203 | < | ForceDecomposition::gather(sendbuf, receivebuf){ | 
| 204 | < | communicators(myIndex_).Allgatherv(); | 
| 129 | < | } | 
| 203 | > | int nt = snap->atomData.force.size(); | 
| 204 | > | vector<Vector3d> trq_tmp(nt, V3Zero); | 
| 205 |  |  | 
| 206 | + | AtomCommVectorRow->scatter(snap->atomIData.torque, trq_tmp); | 
| 207 | + | for (int i = 0; i < n; i++) { | 
| 208 | + | snap->atomData.torque[i] += trq_tmp[i]; | 
| 209 | + | trq_tmp[i] = 0.0; | 
| 210 | + | } | 
| 211 | + |  | 
| 212 | + | AtomCommVectorColumn->scatter(snap->atomJData.torque, trq_tmp); | 
| 213 | + | for (int i = 0; i < n; i++) | 
| 214 | + | snap->atomData.torque[i] += trq_tmp[i]; | 
| 215 | + | } | 
| 216 | + |  | 
| 217 | + | int nLocal = snap->getNumberOfAtoms(); | 
| 218 |  |  | 
| 219 | < |  | 
| 220 | < | ForceDecomposition::scatter(sbuffer, rbuffer){ | 
| 221 | < | communicators(myIndex_).Reduce_scatter(sbuffer, recevbuf. recvcounts, MPI::DOUBLE, MPI::SUM); | 
| 222 | < | } | 
| 223 | < |  | 
| 224 | < |  | 
| 219 | > | vector<vector<RealType> > pot_temp(N_INTERACTION_FAMILIES, | 
| 220 | > | vector<RealType> (nLocal, 0.0)); | 
| 221 | > |  | 
| 222 | > | for (int i = 0; i < N_INTERACTION_FAMILIES; i++) { | 
| 223 | > | AtomCommRealRow->scatter(pot_row[i], pot_temp[i]); | 
| 224 | > | for (int ii = 0;  ii < pot_temp[i].size(); ii++ ) { | 
| 225 | > | pot_local[i] += pot_temp[i][ii]; | 
| 226 | > | } | 
| 227 | > | } | 
| 228 | > | #endif | 
| 229 | > | } | 
| 230 | > |  | 
| 231 | > | } //end namespace OpenMD |