ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 1217
Committed: Tue Jun 1 21:45:22 2004 UTC (20 years, 1 month ago) by gezelter
File size: 12444 byte(s)
Log Message:
Bug fix (fixes of skipList and neighbor list under MPI)

File Contents

# User Rev Content
1 mmeineke 377 #ifdef IS_MPI
2 chuckv 432 #include <iostream>
3 gezelter 829 #include <stdlib.h>
4     #include <string.h>
5     #include <math.h>
6 mmeineke 377 #include <mpi.h>
7    
8     #include "mpiSimulation.hpp"
9     #include "simError.h"
10     #include "fortranWrappers.hpp"
11 gezelter 405 #include "randomSPRNG.hpp"
12 mmeineke 377
13     mpiSimulation* mpiSim;
14    
15     mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16     {
17     entryPlug = the_entryPlug;
18 gezelter 1203 parallelData = new mpiSimData;
19 mmeineke 377
20 gezelter 1203 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21     parallelData->myNode = worldRank;
22 gezelter 405
23     MolToProcMap = new int[entryPlug->n_mol];
24     MolComponentType = new int[entryPlug->n_mol];
25     AtomToProcMap = new int[entryPlug->n_atoms];
26 gezelter 1203 GroupToProcMap = new int[entryPlug->ngroup];
27 gezelter 405
28 mmeineke 377 mpiSim = this;
29     wrapMeSimParallel( this );
30     }
31    
32    
33     mpiSimulation::~mpiSimulation(){
34    
35 mmeineke 418 delete[] MolToProcMap;
36     delete[] MolComponentType;
37     delete[] AtomToProcMap;
38 gezelter 1203 delete[] GroupToProcMap;
39 mmeineke 418
40 gezelter 1203 delete parallelData;
41 mmeineke 377 // perhaps we should let fortran know the party is over.
42    
43     }
44    
45 tim 1108 void mpiSimulation::divideLabor( ){
46 mmeineke 377
47     int nComponents;
48     MoleculeStamp** compStamps;
49 gezelter 416 randomSPRNG *myRandom;
50 mmeineke 377 int* componentsNmol;
51 gezelter 405 int* AtomsPerProc;
52 gezelter 1203 int* GroupsPerProc;
53 mmeineke 377
54     double numerator;
55     double denominator;
56     double precast;
57 gezelter 405 double x, y, a;
58     int old_atoms, add_atoms, new_atoms;
59 gezelter 1203 int old_groups, add_groups, new_groups;
60 mmeineke 377
61     int nTarget;
62 gezelter 1203 int molIndex, atomIndex, groupIndex;
63 mmeineke 377 int done;
64 gezelter 1203 int i, j, loops, which_proc;
65     int nmol_global, nmol_local;
66     int ngroups_global, ngroups_local;
67     int natoms_global, natoms_local;
68     int ncutoff_groups, nAtomsInGroups;
69 mmeineke 787 int local_index;
70 tim 708 int baseSeed = entryPlug->getSeed();
71 gezelter 1203 CutoffGroupStamp* cg;
72 mmeineke 377
73     nComponents = entryPlug->nComponents;
74     compStamps = entryPlug->compStamps;
75     componentsNmol = entryPlug->componentsNmol;
76 gezelter 1203 AtomsPerProc = new int[parallelData->nProcessors];
77     GroupsPerProc = new int[parallelData->nProcessors];
78 gezelter 405
79 gezelter 1203 parallelData->nAtomsGlobal = entryPlug->n_atoms;
80     parallelData->nBondsGlobal = entryPlug->n_bonds;
81     parallelData->nBendsGlobal = entryPlug->n_bends;
82     parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83     parallelData->nSRIGlobal = entryPlug->n_SRI;
84     parallelData->nGroupsGlobal = entryPlug->ngroup;
85     parallelData->nMolGlobal = entryPlug->n_mol;
86 mmeineke 377
87 gezelter 416 myRandom = new randomSPRNG( baseSeed );
88 chuckv 404
89 gezelter 1203 a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
90 chuckv 404
91 gezelter 405 // Initialize things that we'll send out later:
92 gezelter 1203 for (i = 0; i < parallelData->nProcessors; i++ ) {
93 gezelter 405 AtomsPerProc[i] = 0;
94 gezelter 1203 GroupsPerProc[i] = 0;
95 gezelter 405 }
96 gezelter 1203 for (i = 0; i < parallelData->nMolGlobal; i++ ) {
97 gezelter 405 // default to an error condition:
98     MolToProcMap[i] = -1;
99     MolComponentType[i] = -1;
100     }
101 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
102 gezelter 405 // default to an error condition:
103     AtomToProcMap[i] = -1;
104     }
105 gezelter 1203 for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
106     // default to an error condition:
107     GroupToProcMap[i] = -1;
108     }
109 gezelter 405
110 gezelter 1203 if (parallelData->myNode == 0) {
111 gezelter 405 numerator = (double) entryPlug->n_atoms;
112 gezelter 1203 denominator = (double) parallelData->nProcessors;
113 gezelter 405 precast = numerator / denominator;
114     nTarget = (int)( precast + 0.5 );
115 chuckv 404
116 gezelter 405 // Build the array of molecule component types first
117     molIndex = 0;
118     for (i=0; i < nComponents; i++) {
119     for (j=0; j < componentsNmol[i]; j++) {
120     MolComponentType[molIndex] = i;
121     molIndex++;
122     }
123     }
124 chuckv 404
125 gezelter 405 atomIndex = 0;
126 gezelter 1203 groupIndex = 0;
127 chuckv 404
128 gezelter 405 for (i = 0; i < molIndex; i++ ) {
129 chuckv 404
130 gezelter 405 done = 0;
131     loops = 0;
132 chuckv 404
133 gezelter 405 while( !done ){
134     loops++;
135    
136     // Pick a processor at random
137 chuckv 404
138 gezelter 1203 which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
139 chuckv 404
140 gezelter 405 // How many atoms does this processor have?
141    
142     old_atoms = AtomsPerProc[which_proc];
143 gezelter 989 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144 chuckv 432 new_atoms = old_atoms + add_atoms;
145 chuckv 404
146 gezelter 1203 old_groups = GroupsPerProc[which_proc];
147     ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
148     nAtomsInGroups = 0;
149     for (j = 0; j < ncutoff_groups; j++) {
150     cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
151     nAtomsInGroups += cg->getNMembers();
152     }
153     add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;
154     new_groups = old_groups + add_groups;
155    
156 gezelter 405 // If we've been through this loop too many times, we need
157     // to just give up and assign the molecule to this processor
158     // and be done with it.
159    
160     if (loops > 100) {
161     sprintf( painCave.errMsg,
162     "I've tried 100 times to assign molecule %d to a "
163     " processor, but can't find a good spot.\n"
164     "I'm assigning it at random to processor %d.\n",
165     i, which_proc);
166     painCave.isFatal = 0;
167     simError();
168    
169     MolToProcMap[i] = which_proc;
170     AtomsPerProc[which_proc] += add_atoms;
171     for (j = 0 ; j < add_atoms; j++ ) {
172 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
173     atomIndex++;
174 gezelter 405 }
175 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
176     for (j=0; j < add_groups; j++) {
177     GroupToProcMap[groupIndex] = which_proc;
178     groupIndex++;
179     }
180 gezelter 405 done = 1;
181     continue;
182     }
183 chuckv 432
184     // If we can add this molecule to this processor without sending
185     // it above nTarget, then go ahead and do it:
186    
187     if (new_atoms <= nTarget) {
188     MolToProcMap[i] = which_proc;
189     AtomsPerProc[which_proc] += add_atoms;
190     for (j = 0 ; j < add_atoms; j++ ) {
191     AtomToProcMap[atomIndex] = which_proc;
192     atomIndex++;
193     }
194 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
195     for (j=0; j < add_groups; j++) {
196     GroupToProcMap[groupIndex] = which_proc;
197     groupIndex++;
198     }
199 chuckv 432 done = 1;
200     continue;
201     }
202    
203    
204 chuckv 434 // The only situation left is when new_atoms > nTarget. We
205     // want to accept this with some probability that dies off the
206     // farther we are from nTarget
207 gezelter 405
208     // roughly: x = new_atoms - nTarget
209     // Pacc(x) = exp(- a * x)
210 chuckv 434 // where a = penalty / (average atoms per molecule)
211 gezelter 405
212     x = (double) (new_atoms - nTarget);
213 gezelter 416 y = myRandom->getRandom();
214 chuckv 434
215     if (y < exp(- a * x)) {
216 gezelter 405 MolToProcMap[i] = which_proc;
217     AtomsPerProc[which_proc] += add_atoms;
218     for (j = 0 ; j < add_atoms; j++ ) {
219 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
220     atomIndex++;
221     }
222 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
223     for (j=0; j < add_groups; j++) {
224     GroupToProcMap[groupIndex] = which_proc;
225     groupIndex++;
226     }
227 gezelter 405 done = 1;
228     continue;
229     } else {
230     continue;
231     }
232    
233 mmeineke 377 }
234     }
235 gezelter 405
236 gezelter 1203
237 gezelter 405 // Spray out this nonsense to all other processors:
238    
239 gezelter 1217 //std::cerr << "node 0 mol2proc = \n";
240     //for (i = 0; i < parallelData->nMolGlobal; i++)
241     // std::cerr << i << "\t" << MolToProcMap[i] << "\n";
242    
243 gezelter 1203 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
244 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
245 gezelter 405
246 gezelter 1203 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
247 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
248 gezelter 405
249 gezelter 1203 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
250 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
251 gezelter 405
252 gezelter 1203 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
253     MPI_INT, 0, MPI_COMM_WORLD);
254    
255     MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
256 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
257 gezelter 1203
258     MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
259     MPI_INT, 0, MPI_COMM_WORLD);
260 gezelter 405 } else {
261    
262     // Listen to your marching orders from processor 0:
263 mmeineke 377
264 gezelter 1203 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
265 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
266 mmeineke 377
267 gezelter 1203 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
268 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
269 gezelter 405
270 gezelter 1203 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
271 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
272 gezelter 1203
273     MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
274     MPI_INT, 0, MPI_COMM_WORLD);
275 gezelter 405
276 gezelter 1203 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
277 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
278 chuckv 432
279 gezelter 1203 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
280     MPI_INT, 0, MPI_COMM_WORLD);
281 chuckv 432
282 gezelter 1203
283 mmeineke 377 }
284    
285 gezelter 405 // Let's all check for sanity:
286    
287     nmol_local = 0;
288 gezelter 1203 for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
289     if (MolToProcMap[i] == parallelData->myNode) {
290 gezelter 405 nmol_local++;
291     }
292 mmeineke 377 }
293    
294 gezelter 405 natoms_local = 0;
295 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
296     if (AtomToProcMap[i] == parallelData->myNode) {
297 gezelter 405 natoms_local++;
298     }
299     }
300 mmeineke 377
301 gezelter 1203 ngroups_local = 0;
302     for (i = 0; i < parallelData->nGroupsGlobal; i++) {
303     if (GroupToProcMap[i] == parallelData->myNode) {
304     ngroups_local++;
305     }
306     }
307    
308 mmeineke 447 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
309     MPI_COMM_WORLD);
310 gezelter 1203
311 mmeineke 447 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
312     MPI_SUM, MPI_COMM_WORLD);
313 gezelter 1203
314     MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
315     MPI_SUM, MPI_COMM_WORLD);
316 mmeineke 377
317 gezelter 405 if( nmol_global != entryPlug->n_mol ){
318     sprintf( painCave.errMsg,
319     "The sum of all nmol_local, %d, did not equal the "
320     "total number of molecules, %d.\n",
321     nmol_global, entryPlug->n_mol );
322     painCave.isFatal = 1;
323     simError();
324 mmeineke 377 }
325 gezelter 405
326     if( natoms_global != entryPlug->n_atoms ){
327     sprintf( painCave.errMsg,
328     "The sum of all natoms_local, %d, did not equal the "
329     "total number of atoms, %d.\n",
330     natoms_global, entryPlug->n_atoms );
331     painCave.isFatal = 1;
332     simError();
333     }
334 mmeineke 377
335 gezelter 1203 if( ngroups_global != entryPlug->ngroup ){
336     sprintf( painCave.errMsg,
337     "The sum of all ngroups_local, %d, did not equal the "
338     "total number of cutoffGroups, %d.\n",
339     ngroups_global, entryPlug->ngroup );
340     painCave.isFatal = 1;
341     simError();
342     }
343    
344 mmeineke 377 sprintf( checkPointMsg,
345     "Successfully divided the molecules among the processors.\n" );
346     MPIcheckPoint();
347    
348 gezelter 1203 parallelData->nMolLocal = nmol_local;
349     parallelData->nAtomsLocal = natoms_local;
350     parallelData->nGroupsLocal = ngroups_local;
351 mmeineke 377
352 gezelter 1203 globalAtomIndex.resize(parallelData->nAtomsLocal);
353     globalToLocalAtom.resize(parallelData->nAtomsGlobal);
354 gezelter 405 local_index = 0;
355 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
356     if (AtomToProcMap[i] == parallelData->myNode) {
357 tim 1108 globalAtomIndex[local_index] = i;
358    
359     globalToLocalAtom[i] = local_index;
360 chuckv 406 local_index++;
361 tim 1108
362 gezelter 405 }
363 tim 1108 else
364     globalToLocalAtom[i] = -1;
365 mmeineke 377 }
366 tim 1108
367 gezelter 1203 globalGroupIndex.resize(parallelData->nGroupsLocal);
368     globalToLocalGroup.resize(parallelData->nGroupsGlobal);
369 tim 1108 local_index = 0;
370 gezelter 1203 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
371     if (GroupToProcMap[i] == parallelData->myNode) {
372     globalGroupIndex[local_index] = i;
373    
374     globalToLocalGroup[i] = local_index;
375     local_index++;
376    
377     }
378     else
379     globalToLocalGroup[i] = -1;
380     }
381    
382     globalMolIndex.resize(parallelData->nMolLocal);
383     globalToLocalMol.resize(parallelData->nMolGlobal);
384     local_index = 0;
385     for (i = 0; i < parallelData->nMolGlobal; i++) {
386     if (MolToProcMap[i] == parallelData->myNode) {
387 tim 1108 globalMolIndex[local_index] = i;
388     globalToLocalMol[i] = local_index;
389     local_index++;
390     }
391     else
392     globalToLocalMol[i] = -1;
393     }
394 chuckv 432
395 mmeineke 377 }
396    
397    
398     void mpiSimulation::mpiRefresh( void ){
399    
400     int isError, i;
401 gezelter 1214 int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal];
402     int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal];
403 mmeineke 377
404 gezelter 1214 // Fortran indexing needs to be increased by 1 in order to get the 2
405     // languages to not barf
406 mmeineke 377
407 gezelter 1214 for(i = 0; i < parallelData->nAtomsLocal; i++)
408     localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1;
409    
410     for(i = 0; i < parallelData->nGroupsLocal; i++)
411     localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1;
412 mmeineke 377
413     isError = 0;
414 gezelter 1214
415     setFsimParallel( parallelData,
416     &(parallelData->nAtomsLocal), localToGlobalAtomIndex,
417     &(parallelData->nGroupsLocal), localToGlobalGroupIndex,
418     &isError );
419    
420 mmeineke 377 if( isError ){
421    
422     sprintf( painCave.errMsg,
423     "mpiRefresh errror: fortran didn't like something we gave it.\n" );
424     painCave.isFatal = 1;
425     simError();
426     }
427    
428 gezelter 1214 delete[] localToGlobalGroupIndex;
429     delete[] localToGlobalAtomIndex;
430 mmeineke 377
431 gezelter 1203
432 mmeineke 377 sprintf( checkPointMsg,
433     " mpiRefresh successful.\n" );
434     MPIcheckPoint();
435     }
436    
437    
438     #endif // is_mpi