ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 419
Committed: Thu Mar 27 15:07:29 2003 UTC (21 years, 3 months ago) by gezelter
File size: 8648 byte(s)
Log Message:
fixes for local->global atom numbering in MPI

File Contents

# Content
1 #ifdef IS_MPI
2
3 #include <cstdlib>
4 #include <cstring>
5 #include <cmath>
6 #include <mpi.h>
7 #include <mpi++.h>
8
9 #include "mpiSimulation.hpp"
10 #include "simError.h"
11 #include "fortranWrappers.hpp"
12 #include "randomSPRNG.hpp"
13
14 #define BASE_SEED 123456789
15
16 mpiSimulation* mpiSim;
17
18 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19 {
20 entryPlug = the_entryPlug;
21 mpiPlug = new mpiSimData;
22
23 mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
24 mpiPlug->myNode = worldRank;
25
26 MolToProcMap = new int[entryPlug->n_mol];
27 MolComponentType = new int[entryPlug->n_mol];
28 AtomToProcMap = new int[entryPlug->n_atoms];
29
30 mpiSim = this;
31 wrapMeSimParallel( this );
32 }
33
34
35 mpiSimulation::~mpiSimulation(){
36
37 delete[] MolToProcMap;
38 delete[] MolComponentType;
39 delete[] AtomToProcMap;
40
41 delete mpiPlug;
42 // perhaps we should let fortran know the party is over.
43
44 }
45
46 int* mpiSimulation::divideLabor( void ){
47
48 int* globalIndex;
49
50 int nComponents;
51 MoleculeStamp** compStamps;
52 randomSPRNG *myRandom;
53 int* componentsNmol;
54 int* AtomsPerProc;
55
56 double numerator;
57 double denominator;
58 double precast;
59 double x, y, a;
60 int old_atoms, add_atoms, new_atoms;
61
62 int nTarget;
63 int molIndex, atomIndex, compIndex, compStart;
64 int done;
65 int nLocal, molLocal;
66 int i, j, loops, which_proc, nmol_local, natoms_local;
67 int nmol_global, natoms_global;
68 int local_index, index;
69 int smallDiff, bigDiff;
70 int baseSeed = BASE_SEED;
71
72 int testSum;
73
74 nComponents = entryPlug->nComponents;
75 compStamps = entryPlug->compStamps;
76 componentsNmol = entryPlug->componentsNmol;
77 AtomsPerProc = new int[mpiPlug->numberProcessors];
78
79 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
80 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
81 mpiPlug->nBendsGlobal = entryPlug->n_bends;
82 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
83 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
84 mpiPlug->nMolGlobal = entryPlug->n_mol;
85
86 myRandom = new randomSPRNG( baseSeed );
87
88 a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89
90 // Initialize things that we'll send out later:
91 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
92 AtomsPerProc[i] = 0;
93 }
94 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
95 // default to an error condition:
96 MolToProcMap[i] = -1;
97 MolComponentType[i] = -1;
98 }
99 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
100 // default to an error condition:
101 AtomToProcMap[i] = -1;
102 }
103
104 if (mpiPlug->myNode == 0) {
105 numerator = (double) entryPlug->n_atoms;
106 denominator = (double) mpiPlug->numberProcessors;
107 precast = numerator / denominator;
108 nTarget = (int)( precast + 0.5 );
109
110 // Build the array of molecule component types first
111 molIndex = 0;
112 for (i=0; i < nComponents; i++) {
113 for (j=0; j < componentsNmol[i]; j++) {
114 MolComponentType[molIndex] = i;
115 molIndex++;
116 }
117 }
118
119 atomIndex = 0;
120
121 for (i = 0; i < molIndex; i++ ) {
122
123 done = 0;
124 loops = 0;
125
126 while( !done ){
127 loops++;
128
129 // Pick a processor at random
130
131 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
132
133 // How many atoms does this processor have?
134
135 old_atoms = AtomsPerProc[which_proc];
136
137 // If the processor already had too many atoms, just skip this
138 // processor and try again.
139
140 if (old_atoms >= nTarget) continue;
141
142 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
143 new_atoms = old_atoms + add_atoms;
144
145 // If we can add this molecule to this processor without sending
146 // it above nTarget, then go ahead and do it:
147
148 if (new_atoms <= nTarget) {
149 MolToProcMap[i] = which_proc;
150 AtomsPerProc[which_proc] += add_atoms;
151 for (j = 0 ; j < add_atoms; j++ ) {
152 atomIndex++;
153 AtomToProcMap[atomIndex] = which_proc;
154 }
155 done = 1;
156 continue;
157 }
158
159 // If we've been through this loop too many times, we need
160 // to just give up and assign the molecule to this processor
161 // and be done with it.
162
163 if (loops > 100) {
164 sprintf( painCave.errMsg,
165 "I've tried 100 times to assign molecule %d to a "
166 " processor, but can't find a good spot.\n"
167 "I'm assigning it at random to processor %d.\n",
168 i, which_proc);
169 painCave.isFatal = 0;
170 simError();
171
172 MolToProcMap[i] = which_proc;
173 AtomsPerProc[which_proc] += add_atoms;
174 for (j = 0 ; j < add_atoms; j++ ) {
175 atomIndex++;
176 AtomToProcMap[atomIndex] = which_proc;
177 }
178 done = 1;
179 continue;
180 }
181
182 // The only situation left is where old_atoms < nTarget, but
183 // new_atoms > nTarget. We want to accept this with some
184 // probability that dies off the farther we are from nTarget
185
186 // roughly: x = new_atoms - nTarget
187 // Pacc(x) = exp(- a * x)
188 // where a = 1 / (average atoms per molecule)
189
190 x = (double) (new_atoms - nTarget);
191 y = myRandom->getRandom();
192
193 if (exp(- a * x) > y) {
194 MolToProcMap[i] = which_proc;
195 AtomsPerProc[which_proc] += add_atoms;
196 for (j = 0 ; j < add_atoms; j++ ) {
197 atomIndex++;
198 AtomToProcMap[atomIndex] = which_proc;
199 }
200 done = 1;
201 continue;
202 } else {
203 continue;
204 }
205
206 }
207 }
208
209 // Spray out this nonsense to all other processors:
210
211 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
212 MPI_INT, 0);
213
214 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
215 MPI_INT, 0);
216
217 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
218 MPI_INT, 0);
219
220 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
221 MPI_INT, 0);
222 } else {
223
224 // Listen to your marching orders from processor 0:
225
226 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
227 MPI_INT, 0);
228
229 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
230 MPI_INT, 0);
231
232 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
233 MPI_INT, 0);
234
235 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
236 MPI_INT, 0);
237 }
238
239
240 // Let's all check for sanity:
241
242 nmol_local = 0;
243 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
244 if (MolToProcMap[i] == mpiPlug->myNode) {
245 nmol_local++;
246 }
247 }
248
249 natoms_local = 0;
250 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
251 if (AtomToProcMap[i] == mpiPlug->myNode) {
252 natoms_local++;
253 }
254 }
255
256 MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
257 MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
258
259 if( nmol_global != entryPlug->n_mol ){
260 sprintf( painCave.errMsg,
261 "The sum of all nmol_local, %d, did not equal the "
262 "total number of molecules, %d.\n",
263 nmol_global, entryPlug->n_mol );
264 painCave.isFatal = 1;
265 simError();
266 }
267
268 if( natoms_global != entryPlug->n_atoms ){
269 sprintf( painCave.errMsg,
270 "The sum of all natoms_local, %d, did not equal the "
271 "total number of atoms, %d.\n",
272 natoms_global, entryPlug->n_atoms );
273 painCave.isFatal = 1;
274 simError();
275 }
276
277 sprintf( checkPointMsg,
278 "Successfully divided the molecules among the processors.\n" );
279 MPIcheckPoint();
280
281 mpiPlug->myNMol = nmol_local;
282 mpiPlug->myNlocal = natoms_local;
283
284 globalIndex = new int[mpiPlug->myNlocal];
285 local_index = 0;
286 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
287 if (AtomToProcMap[i] == mpiPlug->myNode) {
288 local_index++;
289 globalIndex[local_index] = i;
290 }
291 }
292
293 return globalIndex;
294 }
295
296
297 void mpiSimulation::mpiRefresh( void ){
298
299 int isError, i;
300 int *globalIndex = new int[mpiPlug->myNlocal];
301
302 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
303
304
305 isError = 0;
306 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
307 if( isError ){
308
309 sprintf( painCave.errMsg,
310 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
311 painCave.isFatal = 1;
312 simError();
313 }
314
315 delete[] globalIndex;
316
317 sprintf( checkPointMsg,
318 " mpiRefresh successful.\n" );
319 MPIcheckPoint();
320 }
321
322
323 #endif // is_mpi