ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 1198
Committed: Thu May 27 00:48:12 2004 UTC (20 years, 1 month ago) by tim
File size: 8818 byte(s)
Log Message:
in the progress of fixing MPI version of cutoff group

File Contents

# User Rev Content
1 mmeineke 377 #ifdef IS_MPI
2 chuckv 432 #include <iostream>
3 gezelter 829 #include <stdlib.h>
4     #include <string.h>
5     #include <math.h>
6 mmeineke 377 #include <mpi.h>
7    
8     #include "mpiSimulation.hpp"
9     #include "simError.h"
10     #include "fortranWrappers.hpp"
11 gezelter 405 #include "randomSPRNG.hpp"
12 mmeineke 377
13     mpiSimulation* mpiSim;
14    
15     mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16     {
17     entryPlug = the_entryPlug;
18     mpiPlug = new mpiSimData;
19    
20 tim 1198 MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) );
21 mmeineke 377 mpiPlug->myNode = worldRank;
22 gezelter 405
23     MolToProcMap = new int[entryPlug->n_mol];
24     MolComponentType = new int[entryPlug->n_mol];
25     AtomToProcMap = new int[entryPlug->n_atoms];
26    
27 mmeineke 377 mpiSim = this;
28     wrapMeSimParallel( this );
29     }
30    
31    
32     mpiSimulation::~mpiSimulation(){
33    
34 mmeineke 418 delete[] MolToProcMap;
35     delete[] MolComponentType;
36     delete[] AtomToProcMap;
37    
38 mmeineke 377 delete mpiPlug;
39     // perhaps we should let fortran know the party is over.
40    
41     }
42    
43 tim 1108 void mpiSimulation::divideLabor( ){
44 mmeineke 377
45     int nComponents;
46     MoleculeStamp** compStamps;
47 gezelter 416 randomSPRNG *myRandom;
48 mmeineke 377 int* componentsNmol;
49 gezelter 405 int* AtomsPerProc;
50 mmeineke 377
51     double numerator;
52     double denominator;
53     double precast;
54 gezelter 405 double x, y, a;
55     int old_atoms, add_atoms, new_atoms;
56 mmeineke 377
57     int nTarget;
58 mmeineke 787 int molIndex, atomIndex;
59 mmeineke 377 int done;
60 gezelter 416 int i, j, loops, which_proc, nmol_local, natoms_local;
61     int nmol_global, natoms_global;
62 mmeineke 787 int local_index;
63 tim 708 int baseSeed = entryPlug->getSeed();
64 mmeineke 377
65     nComponents = entryPlug->nComponents;
66     compStamps = entryPlug->compStamps;
67     componentsNmol = entryPlug->componentsNmol;
68 tim 1198 AtomsPerProc = new int[mpiPlug->nProcessors];
69 gezelter 405
70 mmeineke 377 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
71     mpiPlug->nBondsGlobal = entryPlug->n_bonds;
72     mpiPlug->nBendsGlobal = entryPlug->n_bends;
73     mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
74     mpiPlug->nSRIGlobal = entryPlug->n_SRI;
75     mpiPlug->nMolGlobal = entryPlug->n_mol;
76 tim 1198 mpiPlug->nGroupsGlobal = entryPlug->ngroup;
77 mmeineke 377
78 gezelter 416 myRandom = new randomSPRNG( baseSeed );
79 chuckv 404
80 chuckv 434 a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
81 chuckv 404
82 gezelter 405 // Initialize things that we'll send out later:
83 tim 1198 for (i = 0; i < mpiPlug->nProcessors; i++ ) {
84 gezelter 405 AtomsPerProc[i] = 0;
85     }
86     for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
87     // default to an error condition:
88     MolToProcMap[i] = -1;
89     MolComponentType[i] = -1;
90     }
91     for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
92     // default to an error condition:
93     AtomToProcMap[i] = -1;
94     }
95    
96     if (mpiPlug->myNode == 0) {
97     numerator = (double) entryPlug->n_atoms;
98 tim 1198 denominator = (double) mpiPlug->nProcessors;
99 gezelter 405 precast = numerator / denominator;
100     nTarget = (int)( precast + 0.5 );
101 chuckv 404
102 gezelter 405 // Build the array of molecule component types first
103     molIndex = 0;
104     for (i=0; i < nComponents; i++) {
105     for (j=0; j < componentsNmol[i]; j++) {
106     MolComponentType[molIndex] = i;
107     molIndex++;
108     }
109     }
110 chuckv 404
111 gezelter 405 atomIndex = 0;
112 chuckv 404
113 gezelter 405 for (i = 0; i < molIndex; i++ ) {
114 chuckv 404
115 gezelter 405 done = 0;
116     loops = 0;
117 chuckv 404
118 gezelter 405 while( !done ){
119     loops++;
120    
121     // Pick a processor at random
122 chuckv 404
123 tim 1198 which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors);
124 chuckv 404
125 gezelter 405 // How many atoms does this processor have?
126    
127     old_atoms = AtomsPerProc[which_proc];
128 gezelter 989 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
129 chuckv 432 new_atoms = old_atoms + add_atoms;
130 chuckv 404
131 gezelter 405 // If we've been through this loop too many times, we need
132     // to just give up and assign the molecule to this processor
133     // and be done with it.
134    
135     if (loops > 100) {
136     sprintf( painCave.errMsg,
137     "I've tried 100 times to assign molecule %d to a "
138     " processor, but can't find a good spot.\n"
139     "I'm assigning it at random to processor %d.\n",
140     i, which_proc);
141     painCave.isFatal = 0;
142     simError();
143    
144     MolToProcMap[i] = which_proc;
145     AtomsPerProc[which_proc] += add_atoms;
146     for (j = 0 ; j < add_atoms; j++ ) {
147 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
148     atomIndex++;
149 gezelter 405 }
150     done = 1;
151     continue;
152     }
153 chuckv 432
154     // If we can add this molecule to this processor without sending
155     // it above nTarget, then go ahead and do it:
156    
157     if (new_atoms <= nTarget) {
158     MolToProcMap[i] = which_proc;
159     AtomsPerProc[which_proc] += add_atoms;
160     for (j = 0 ; j < add_atoms; j++ ) {
161     AtomToProcMap[atomIndex] = which_proc;
162     atomIndex++;
163     }
164     done = 1;
165     continue;
166     }
167    
168    
169 chuckv 434 // The only situation left is when new_atoms > nTarget. We
170     // want to accept this with some probability that dies off the
171     // farther we are from nTarget
172 gezelter 405
173     // roughly: x = new_atoms - nTarget
174     // Pacc(x) = exp(- a * x)
175 chuckv 434 // where a = penalty / (average atoms per molecule)
176 gezelter 405
177     x = (double) (new_atoms - nTarget);
178 gezelter 416 y = myRandom->getRandom();
179 chuckv 434
180     if (y < exp(- a * x)) {
181 gezelter 405 MolToProcMap[i] = which_proc;
182     AtomsPerProc[which_proc] += add_atoms;
183     for (j = 0 ; j < add_atoms; j++ ) {
184 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
185     atomIndex++;
186     }
187 gezelter 405 done = 1;
188     continue;
189     } else {
190     continue;
191     }
192    
193 mmeineke 377 }
194     }
195 gezelter 405
196     // Spray out this nonsense to all other processors:
197    
198 mmeineke 447 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
199     MPI_INT, 0, MPI_COMM_WORLD);
200 gezelter 405
201 mmeineke 447 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
202     MPI_INT, 0, MPI_COMM_WORLD);
203 gezelter 405
204 mmeineke 447 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
205     MPI_INT, 0, MPI_COMM_WORLD);
206 gezelter 405
207 tim 1198 MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors,
208 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
209 gezelter 405 } else {
210    
211     // Listen to your marching orders from processor 0:
212 mmeineke 377
213 mmeineke 447 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
214     MPI_INT, 0, MPI_COMM_WORLD);
215 mmeineke 377
216 mmeineke 447 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
217     MPI_INT, 0, MPI_COMM_WORLD);
218 gezelter 405
219 mmeineke 447 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
220     MPI_INT, 0, MPI_COMM_WORLD);
221 gezelter 405
222 tim 1198 MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors,
223 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
224 chuckv 432
225    
226 mmeineke 377 }
227    
228    
229 gezelter 405 // Let's all check for sanity:
230    
231     nmol_local = 0;
232     for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
233     if (MolToProcMap[i] == mpiPlug->myNode) {
234     nmol_local++;
235     }
236 mmeineke 377 }
237    
238 gezelter 405 natoms_local = 0;
239     for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
240     if (AtomToProcMap[i] == mpiPlug->myNode) {
241     natoms_local++;
242     }
243     }
244 mmeineke 377
245 mmeineke 447 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
246     MPI_COMM_WORLD);
247     MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
248     MPI_SUM, MPI_COMM_WORLD);
249 mmeineke 377
250 gezelter 405 if( nmol_global != entryPlug->n_mol ){
251     sprintf( painCave.errMsg,
252     "The sum of all nmol_local, %d, did not equal the "
253     "total number of molecules, %d.\n",
254     nmol_global, entryPlug->n_mol );
255     painCave.isFatal = 1;
256     simError();
257 mmeineke 377 }
258 gezelter 405
259     if( natoms_global != entryPlug->n_atoms ){
260     sprintf( painCave.errMsg,
261     "The sum of all natoms_local, %d, did not equal the "
262     "total number of atoms, %d.\n",
263     natoms_global, entryPlug->n_atoms );
264     painCave.isFatal = 1;
265     simError();
266     }
267 mmeineke 377
268     sprintf( checkPointMsg,
269     "Successfully divided the molecules among the processors.\n" );
270     MPIcheckPoint();
271    
272 tim 1198 mpiPlug->nMolLocal = nmol_local;
273     mpiPlug->nAtomsLocal = natoms_local;
274 mmeineke 377
275 tim 1198 globalAtomIndex.resize(mpiPlug->nAtomsLocal);
276 tim 1129 globalToLocalAtom.resize(mpiPlug->nAtomsGlobal);
277 gezelter 405 local_index = 0;
278     for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
279     if (AtomToProcMap[i] == mpiPlug->myNode) {
280 tim 1108 globalAtomIndex[local_index] = i;
281    
282     globalToLocalAtom[i] = local_index;
283 chuckv 406 local_index++;
284 tim 1108
285 gezelter 405 }
286 tim 1108 else
287     globalToLocalAtom[i] = -1;
288 mmeineke 377 }
289 tim 1108
290 tim 1198 globalMolIndex.resize(mpiPlug->nMolLocal);
291 tim 1129 globalToLocalMol.resize(mpiPlug->nMolGlobal);
292    
293 tim 1108 local_index = 0;
294     for (i = 0; i < mpiPlug->nMolGlobal; i++) {
295     if (MolToProcMap[i] == mpiPlug->myNode) {
296     globalMolIndex[local_index] = i;
297     globalToLocalMol[i] = local_index;
298     local_index++;
299     }
300     else
301     globalToLocalMol[i] = -1;
302     }
303 chuckv 432
304 mmeineke 377 }
305    
306    
307     void mpiSimulation::mpiRefresh( void ){
308    
309     int isError, i;
310 tim 1198 int *globalIndex = new int[mpiPlug->nAtomsLocal];
311 mmeineke 377
312 chuckv 441 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
313     // not barf
314 mmeineke 377
315 tim 1198 for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
316 chuckv 441
317 mmeineke 377
318     isError = 0;
319     setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
320     if( isError ){
321    
322     sprintf( painCave.errMsg,
323     "mpiRefresh errror: fortran didn't like something we gave it.\n" );
324     painCave.isFatal = 1;
325     simError();
326     }
327    
328     delete[] globalIndex;
329    
330     sprintf( checkPointMsg,
331     " mpiRefresh successful.\n" );
332     MPIcheckPoint();
333     }
334    
335    
336     #endif // is_mpi