ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 432
Committed: Thu Mar 27 23:33:40 2003 UTC (21 years, 3 months ago) by chuckv
File size: 8714 byte(s)
Log Message:
MPI buggy, fixed mpiRefresh issue.

File Contents

# User Rev Content
1 mmeineke 377 #ifdef IS_MPI
2 chuckv 432 #include <iostream>
3 mmeineke 377 #include <cstdlib>
4     #include <cstring>
5 gezelter 416 #include <cmath>
6 mmeineke 377 #include <mpi.h>
7     #include <mpi++.h>
8    
9     #include "mpiSimulation.hpp"
10     #include "simError.h"
11     #include "fortranWrappers.hpp"
12 gezelter 405 #include "randomSPRNG.hpp"
13 mmeineke 377
14 gezelter 416 #define BASE_SEED 123456789
15 mmeineke 377
16     mpiSimulation* mpiSim;
17    
18     mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19     {
20     entryPlug = the_entryPlug;
21     mpiPlug = new mpiSimData;
22    
23     mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
24     mpiPlug->myNode = worldRank;
25 gezelter 405
26     MolToProcMap = new int[entryPlug->n_mol];
27     MolComponentType = new int[entryPlug->n_mol];
28     AtomToProcMap = new int[entryPlug->n_atoms];
29    
30 mmeineke 377 mpiSim = this;
31     wrapMeSimParallel( this );
32     }
33    
34    
35     mpiSimulation::~mpiSimulation(){
36    
37 mmeineke 418 delete[] MolToProcMap;
38     delete[] MolComponentType;
39     delete[] AtomToProcMap;
40    
41 mmeineke 377 delete mpiPlug;
42     // perhaps we should let fortran know the party is over.
43    
44     }
45    
46     int* mpiSimulation::divideLabor( void ){
47    
48     int* globalIndex;
49    
50     int nComponents;
51     MoleculeStamp** compStamps;
52 gezelter 416 randomSPRNG *myRandom;
53 mmeineke 377 int* componentsNmol;
54 gezelter 405 int* AtomsPerProc;
55 mmeineke 377
56     double numerator;
57     double denominator;
58     double precast;
59 gezelter 405 double x, y, a;
60     int old_atoms, add_atoms, new_atoms;
61 mmeineke 377
62     int nTarget;
63     int molIndex, atomIndex, compIndex, compStart;
64     int done;
65     int nLocal, molLocal;
66 gezelter 416 int i, j, loops, which_proc, nmol_local, natoms_local;
67     int nmol_global, natoms_global;
68     int local_index, index;
69 mmeineke 377 int smallDiff, bigDiff;
70 gezelter 416 int baseSeed = BASE_SEED;
71 mmeineke 377
72     int testSum;
73    
74     nComponents = entryPlug->nComponents;
75     compStamps = entryPlug->compStamps;
76     componentsNmol = entryPlug->componentsNmol;
77 gezelter 405 AtomsPerProc = new int[mpiPlug->numberProcessors];
78    
79 mmeineke 377 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
80     mpiPlug->nBondsGlobal = entryPlug->n_bonds;
81     mpiPlug->nBendsGlobal = entryPlug->n_bends;
82     mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
83     mpiPlug->nSRIGlobal = entryPlug->n_SRI;
84     mpiPlug->nMolGlobal = entryPlug->n_mol;
85    
86 gezelter 416 myRandom = new randomSPRNG( baseSeed );
87 chuckv 404
88 gezelter 405 a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89 chuckv 404
90 gezelter 405 // Initialize things that we'll send out later:
91     for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
92     AtomsPerProc[i] = 0;
93     }
94     for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
95     // default to an error condition:
96     MolToProcMap[i] = -1;
97     MolComponentType[i] = -1;
98     }
99     for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
100     // default to an error condition:
101     AtomToProcMap[i] = -1;
102     }
103    
104     if (mpiPlug->myNode == 0) {
105     numerator = (double) entryPlug->n_atoms;
106     denominator = (double) mpiPlug->numberProcessors;
107     precast = numerator / denominator;
108     nTarget = (int)( precast + 0.5 );
109 chuckv 404
110 gezelter 405 // Build the array of molecule component types first
111     molIndex = 0;
112     for (i=0; i < nComponents; i++) {
113     for (j=0; j < componentsNmol[i]; j++) {
114     MolComponentType[molIndex] = i;
115     molIndex++;
116     }
117     }
118 chuckv 404
119 gezelter 405 atomIndex = 0;
120 chuckv 404
121 gezelter 405 for (i = 0; i < molIndex; i++ ) {
122 chuckv 404
123 gezelter 405 done = 0;
124     loops = 0;
125 chuckv 404
126 gezelter 405 while( !done ){
127     loops++;
128    
129     // Pick a processor at random
130 chuckv 404
131 gezelter 416 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
132 chuckv 404
133 gezelter 405 // How many atoms does this processor have?
134    
135     old_atoms = AtomsPerProc[which_proc];
136 chuckv 432 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
137     new_atoms = old_atoms + add_atoms;
138 chuckv 404
139 gezelter 405 // If the processor already had too many atoms, just skip this
140     // processor and try again.
141 chuckv 404
142 gezelter 405 // If we've been through this loop too many times, we need
143     // to just give up and assign the molecule to this processor
144     // and be done with it.
145    
146     if (loops > 100) {
147     sprintf( painCave.errMsg,
148     "I've tried 100 times to assign molecule %d to a "
149     " processor, but can't find a good spot.\n"
150     "I'm assigning it at random to processor %d.\n",
151     i, which_proc);
152     painCave.isFatal = 0;
153     simError();
154    
155     MolToProcMap[i] = which_proc;
156     AtomsPerProc[which_proc] += add_atoms;
157     for (j = 0 ; j < add_atoms; j++ ) {
158 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
159     atomIndex++;
160 gezelter 405 }
161     done = 1;
162     continue;
163     }
164    
165 chuckv 432 if (old_atoms >= nTarget) continue;
166    
167     // If we can add this molecule to this processor without sending
168     // it above nTarget, then go ahead and do it:
169    
170     if (new_atoms <= nTarget) {
171     MolToProcMap[i] = which_proc;
172     AtomsPerProc[which_proc] += add_atoms;
173     for (j = 0 ; j < add_atoms; j++ ) {
174     AtomToProcMap[atomIndex] = which_proc;
175     atomIndex++;
176     }
177     done = 1;
178     continue;
179     }
180    
181    
182 gezelter 405 // The only situation left is where old_atoms < nTarget, but
183     // new_atoms > nTarget. We want to accept this with some
184     // probability that dies off the farther we are from nTarget
185    
186     // roughly: x = new_atoms - nTarget
187     // Pacc(x) = exp(- a * x)
188     // where a = 1 / (average atoms per molecule)
189    
190     x = (double) (new_atoms - nTarget);
191 gezelter 416 y = myRandom->getRandom();
192 gezelter 405
193     if (exp(- a * x) > y) {
194     MolToProcMap[i] = which_proc;
195     AtomsPerProc[which_proc] += add_atoms;
196     for (j = 0 ; j < add_atoms; j++ ) {
197 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
198     atomIndex++;
199     }
200 gezelter 405 done = 1;
201     continue;
202     } else {
203     continue;
204     }
205    
206 mmeineke 377 }
207     }
208 gezelter 405
209     // Spray out this nonsense to all other processors:
210    
211 mmeineke 418 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
212 gezelter 405 MPI_INT, 0);
213    
214 mmeineke 418 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
215 gezelter 405 MPI_INT, 0);
216    
217 mmeineke 418 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
218 gezelter 405 MPI_INT, 0);
219    
220 mmeineke 418 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
221 gezelter 405 MPI_INT, 0);
222     } else {
223    
224     // Listen to your marching orders from processor 0:
225 mmeineke 377
226 mmeineke 418 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
227 gezelter 405 MPI_INT, 0);
228 mmeineke 377
229 mmeineke 418 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
230 gezelter 405 MPI_INT, 0);
231    
232 mmeineke 418 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
233 gezelter 405 MPI_INT, 0);
234    
235 mmeineke 418 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
236 gezelter 405 MPI_INT, 0);
237 chuckv 432
238    
239 mmeineke 377 }
240    
241    
242 gezelter 405 // Let's all check for sanity:
243    
244     nmol_local = 0;
245     for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
246     if (MolToProcMap[i] == mpiPlug->myNode) {
247     nmol_local++;
248     }
249 mmeineke 377 }
250    
251 gezelter 405 natoms_local = 0;
252     for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
253     if (AtomToProcMap[i] == mpiPlug->myNode) {
254     natoms_local++;
255     }
256     }
257 mmeineke 377
258 chuckv 432 std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n";
259    
260 gezelter 405 MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
261     MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
262 mmeineke 377
263 gezelter 405 if( nmol_global != entryPlug->n_mol ){
264     sprintf( painCave.errMsg,
265     "The sum of all nmol_local, %d, did not equal the "
266     "total number of molecules, %d.\n",
267     nmol_global, entryPlug->n_mol );
268     painCave.isFatal = 1;
269     simError();
270 mmeineke 377 }
271 gezelter 405
272     if( natoms_global != entryPlug->n_atoms ){
273     sprintf( painCave.errMsg,
274     "The sum of all natoms_local, %d, did not equal the "
275     "total number of atoms, %d.\n",
276     natoms_global, entryPlug->n_atoms );
277     painCave.isFatal = 1;
278     simError();
279     }
280 mmeineke 377
281     sprintf( checkPointMsg,
282     "Successfully divided the molecules among the processors.\n" );
283     MPIcheckPoint();
284    
285 gezelter 405 mpiPlug->myNMol = nmol_local;
286     mpiPlug->myNlocal = natoms_local;
287 mmeineke 377
288     globalIndex = new int[mpiPlug->myNlocal];
289 gezelter 405 local_index = 0;
290     for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
291     if (AtomToProcMap[i] == mpiPlug->myNode) {
292 mmeineke 422 globalIndex[local_index] = i;
293 chuckv 406 local_index++;
294 gezelter 405 }
295 mmeineke 377 }
296 chuckv 432
297 gezelter 416 return globalIndex;
298 mmeineke 377 }
299    
300    
301     void mpiSimulation::mpiRefresh( void ){
302    
303     int isError, i;
304     int *globalIndex = new int[mpiPlug->myNlocal];
305    
306     for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
307    
308    
309     isError = 0;
310     setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
311     if( isError ){
312    
313     sprintf( painCave.errMsg,
314     "mpiRefresh errror: fortran didn't like something we gave it.\n" );
315     painCave.isFatal = 1;
316     simError();
317     }
318    
319     delete[] globalIndex;
320    
321     sprintf( checkPointMsg,
322     " mpiRefresh successful.\n" );
323     MPIcheckPoint();
324     }
325    
326    
327     #endif // is_mpi