ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 1203
Committed: Thu May 27 18:59:17 2004 UTC (20 years, 1 month ago) by gezelter
File size: 11925 byte(s)
Log Message:
Cutoff group changes under MPI

File Contents

# User Rev Content
1 mmeineke 377 #ifdef IS_MPI
2 chuckv 432 #include <iostream>
3 gezelter 829 #include <stdlib.h>
4     #include <string.h>
5     #include <math.h>
6 mmeineke 377 #include <mpi.h>
7    
8     #include "mpiSimulation.hpp"
9     #include "simError.h"
10     #include "fortranWrappers.hpp"
11 gezelter 405 #include "randomSPRNG.hpp"
12 mmeineke 377
13     mpiSimulation* mpiSim;
14    
15     mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16     {
17     entryPlug = the_entryPlug;
18 gezelter 1203 parallelData = new mpiSimData;
19 mmeineke 377
20 gezelter 1203 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21     parallelData->myNode = worldRank;
22 gezelter 405
23     MolToProcMap = new int[entryPlug->n_mol];
24     MolComponentType = new int[entryPlug->n_mol];
25     AtomToProcMap = new int[entryPlug->n_atoms];
26 gezelter 1203 GroupToProcMap = new int[entryPlug->ngroup];
27 gezelter 405
28 mmeineke 377 mpiSim = this;
29     wrapMeSimParallel( this );
30     }
31    
32    
33     mpiSimulation::~mpiSimulation(){
34    
35 mmeineke 418 delete[] MolToProcMap;
36     delete[] MolComponentType;
37     delete[] AtomToProcMap;
38 gezelter 1203 delete[] GroupToProcMap;
39 mmeineke 418
40 gezelter 1203 delete parallelData;
41 mmeineke 377 // perhaps we should let fortran know the party is over.
42    
43     }
44    
45 tim 1108 void mpiSimulation::divideLabor( ){
46 mmeineke 377
47     int nComponents;
48     MoleculeStamp** compStamps;
49 gezelter 416 randomSPRNG *myRandom;
50 mmeineke 377 int* componentsNmol;
51 gezelter 405 int* AtomsPerProc;
52 gezelter 1203 int* GroupsPerProc;
53 mmeineke 377
54     double numerator;
55     double denominator;
56     double precast;
57 gezelter 405 double x, y, a;
58     int old_atoms, add_atoms, new_atoms;
59 gezelter 1203 int old_groups, add_groups, new_groups;
60 mmeineke 377
61     int nTarget;
62 gezelter 1203 int molIndex, atomIndex, groupIndex;
63 mmeineke 377 int done;
64 gezelter 1203 int i, j, loops, which_proc;
65     int nmol_global, nmol_local;
66     int ngroups_global, ngroups_local;
67     int natoms_global, natoms_local;
68     int ncutoff_groups, nAtomsInGroups;
69 mmeineke 787 int local_index;
70 tim 708 int baseSeed = entryPlug->getSeed();
71 gezelter 1203 CutoffGroupStamp* cg;
72 mmeineke 377
73     nComponents = entryPlug->nComponents;
74     compStamps = entryPlug->compStamps;
75     componentsNmol = entryPlug->componentsNmol;
76 gezelter 1203 AtomsPerProc = new int[parallelData->nProcessors];
77     GroupsPerProc = new int[parallelData->nProcessors];
78 gezelter 405
79 gezelter 1203 parallelData->nAtomsGlobal = entryPlug->n_atoms;
80     parallelData->nBondsGlobal = entryPlug->n_bonds;
81     parallelData->nBendsGlobal = entryPlug->n_bends;
82     parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83     parallelData->nSRIGlobal = entryPlug->n_SRI;
84     parallelData->nGroupsGlobal = entryPlug->ngroup;
85     parallelData->nMolGlobal = entryPlug->n_mol;
86 mmeineke 377
87 gezelter 416 myRandom = new randomSPRNG( baseSeed );
88 chuckv 404
89 gezelter 1203 a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
90 chuckv 404
91 gezelter 405 // Initialize things that we'll send out later:
92 gezelter 1203 for (i = 0; i < parallelData->nProcessors; i++ ) {
93 gezelter 405 AtomsPerProc[i] = 0;
94 gezelter 1203 GroupsPerProc[i] = 0;
95 gezelter 405 }
96 gezelter 1203 for (i = 0; i < parallelData->nMolGlobal; i++ ) {
97 gezelter 405 // default to an error condition:
98     MolToProcMap[i] = -1;
99     MolComponentType[i] = -1;
100     }
101 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
102 gezelter 405 // default to an error condition:
103     AtomToProcMap[i] = -1;
104     }
105 gezelter 1203 for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
106     // default to an error condition:
107     GroupToProcMap[i] = -1;
108     }
109 gezelter 405
110 gezelter 1203 if (parallelData->myNode == 0) {
111 gezelter 405 numerator = (double) entryPlug->n_atoms;
112 gezelter 1203 denominator = (double) parallelData->nProcessors;
113 gezelter 405 precast = numerator / denominator;
114     nTarget = (int)( precast + 0.5 );
115 chuckv 404
116 gezelter 405 // Build the array of molecule component types first
117     molIndex = 0;
118     for (i=0; i < nComponents; i++) {
119     for (j=0; j < componentsNmol[i]; j++) {
120     MolComponentType[molIndex] = i;
121     molIndex++;
122     }
123     }
124 chuckv 404
125 gezelter 405 atomIndex = 0;
126 gezelter 1203 groupIndex = 0;
127 chuckv 404
128 gezelter 405 for (i = 0; i < molIndex; i++ ) {
129 chuckv 404
130 gezelter 405 done = 0;
131     loops = 0;
132 chuckv 404
133 gezelter 405 while( !done ){
134     loops++;
135    
136     // Pick a processor at random
137 chuckv 404
138 gezelter 1203 which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
139 chuckv 404
140 gezelter 405 // How many atoms does this processor have?
141    
142     old_atoms = AtomsPerProc[which_proc];
143 gezelter 989 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144 chuckv 432 new_atoms = old_atoms + add_atoms;
145 chuckv 404
146 gezelter 1203 old_groups = GroupsPerProc[which_proc];
147     ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
148     nAtomsInGroups = 0;
149     for (j = 0; j < ncutoff_groups; j++) {
150     cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
151     nAtomsInGroups += cg->getNMembers();
152     }
153     add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;
154     new_groups = old_groups + add_groups;
155    
156 gezelter 405 // If we've been through this loop too many times, we need
157     // to just give up and assign the molecule to this processor
158     // and be done with it.
159    
160     if (loops > 100) {
161     sprintf( painCave.errMsg,
162     "I've tried 100 times to assign molecule %d to a "
163     " processor, but can't find a good spot.\n"
164     "I'm assigning it at random to processor %d.\n",
165     i, which_proc);
166     painCave.isFatal = 0;
167     simError();
168    
169     MolToProcMap[i] = which_proc;
170     AtomsPerProc[which_proc] += add_atoms;
171     for (j = 0 ; j < add_atoms; j++ ) {
172 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
173     atomIndex++;
174 gezelter 405 }
175 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
176     for (j=0; j < add_groups; j++) {
177     GroupToProcMap[groupIndex] = which_proc;
178     groupIndex++;
179     }
180 gezelter 405 done = 1;
181     continue;
182     }
183 chuckv 432
184     // If we can add this molecule to this processor without sending
185     // it above nTarget, then go ahead and do it:
186    
187     if (new_atoms <= nTarget) {
188     MolToProcMap[i] = which_proc;
189     AtomsPerProc[which_proc] += add_atoms;
190     for (j = 0 ; j < add_atoms; j++ ) {
191     AtomToProcMap[atomIndex] = which_proc;
192     atomIndex++;
193     }
194 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
195     for (j=0; j < add_groups; j++) {
196     GroupToProcMap[groupIndex] = which_proc;
197     groupIndex++;
198     }
199 chuckv 432 done = 1;
200     continue;
201     }
202    
203    
204 chuckv 434 // The only situation left is when new_atoms > nTarget. We
205     // want to accept this with some probability that dies off the
206     // farther we are from nTarget
207 gezelter 405
208     // roughly: x = new_atoms - nTarget
209     // Pacc(x) = exp(- a * x)
210 chuckv 434 // where a = penalty / (average atoms per molecule)
211 gezelter 405
212     x = (double) (new_atoms - nTarget);
213 gezelter 416 y = myRandom->getRandom();
214 chuckv 434
215     if (y < exp(- a * x)) {
216 gezelter 405 MolToProcMap[i] = which_proc;
217     AtomsPerProc[which_proc] += add_atoms;
218     for (j = 0 ; j < add_atoms; j++ ) {
219 mmeineke 422 AtomToProcMap[atomIndex] = which_proc;
220     atomIndex++;
221     }
222 gezelter 1203 GroupsPerProc[which_proc] += add_groups;
223     for (j=0; j < add_groups; j++) {
224     GroupToProcMap[groupIndex] = which_proc;
225     groupIndex++;
226     }
227 gezelter 405 done = 1;
228     continue;
229     } else {
230     continue;
231     }
232    
233 mmeineke 377 }
234     }
235 gezelter 405
236 gezelter 1203
237 gezelter 405 // Spray out this nonsense to all other processors:
238    
239 gezelter 1203 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
240 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
241 gezelter 405
242 gezelter 1203 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
243 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
244 gezelter 405
245 gezelter 1203 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
246 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
247 gezelter 405
248 gezelter 1203 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
249     MPI_INT, 0, MPI_COMM_WORLD);
250    
251     MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
252 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
253 gezelter 1203
254     MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
255     MPI_INT, 0, MPI_COMM_WORLD);
256 gezelter 405 } else {
257    
258     // Listen to your marching orders from processor 0:
259 mmeineke 377
260 gezelter 1203 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
261 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
262 mmeineke 377
263 gezelter 1203 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
264 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
265 gezelter 405
266 gezelter 1203 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
267 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
268 gezelter 1203
269     MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
270     MPI_INT, 0, MPI_COMM_WORLD);
271 gezelter 405
272 gezelter 1203 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
273 mmeineke 447 MPI_INT, 0, MPI_COMM_WORLD);
274 chuckv 432
275 gezelter 1203 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
276     MPI_INT, 0, MPI_COMM_WORLD);
277 chuckv 432
278 gezelter 1203
279 mmeineke 377 }
280    
281 gezelter 405 // Let's all check for sanity:
282    
283     nmol_local = 0;
284 gezelter 1203 for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
285     if (MolToProcMap[i] == parallelData->myNode) {
286 gezelter 405 nmol_local++;
287     }
288 mmeineke 377 }
289    
290 gezelter 405 natoms_local = 0;
291 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
292     if (AtomToProcMap[i] == parallelData->myNode) {
293 gezelter 405 natoms_local++;
294     }
295     }
296 mmeineke 377
297 gezelter 1203 ngroups_local = 0;
298     for (i = 0; i < parallelData->nGroupsGlobal; i++) {
299     if (GroupToProcMap[i] == parallelData->myNode) {
300     ngroups_local++;
301     }
302     }
303    
304 mmeineke 447 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
305     MPI_COMM_WORLD);
306 gezelter 1203
307 mmeineke 447 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
308     MPI_SUM, MPI_COMM_WORLD);
309 gezelter 1203
310     MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
311     MPI_SUM, MPI_COMM_WORLD);
312 mmeineke 377
313 gezelter 405 if( nmol_global != entryPlug->n_mol ){
314     sprintf( painCave.errMsg,
315     "The sum of all nmol_local, %d, did not equal the "
316     "total number of molecules, %d.\n",
317     nmol_global, entryPlug->n_mol );
318     painCave.isFatal = 1;
319     simError();
320 mmeineke 377 }
321 gezelter 405
322     if( natoms_global != entryPlug->n_atoms ){
323     sprintf( painCave.errMsg,
324     "The sum of all natoms_local, %d, did not equal the "
325     "total number of atoms, %d.\n",
326     natoms_global, entryPlug->n_atoms );
327     painCave.isFatal = 1;
328     simError();
329     }
330 mmeineke 377
331 gezelter 1203 if( ngroups_global != entryPlug->ngroup ){
332     sprintf( painCave.errMsg,
333     "The sum of all ngroups_local, %d, did not equal the "
334     "total number of cutoffGroups, %d.\n",
335     ngroups_global, entryPlug->ngroup );
336     painCave.isFatal = 1;
337     simError();
338     }
339    
340 mmeineke 377 sprintf( checkPointMsg,
341     "Successfully divided the molecules among the processors.\n" );
342     MPIcheckPoint();
343    
344 gezelter 1203 parallelData->nMolLocal = nmol_local;
345     parallelData->nAtomsLocal = natoms_local;
346     parallelData->nGroupsLocal = ngroups_local;
347 mmeineke 377
348 gezelter 1203 globalAtomIndex.resize(parallelData->nAtomsLocal);
349     globalToLocalAtom.resize(parallelData->nAtomsGlobal);
350 gezelter 405 local_index = 0;
351 gezelter 1203 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
352     if (AtomToProcMap[i] == parallelData->myNode) {
353 tim 1108 globalAtomIndex[local_index] = i;
354    
355     globalToLocalAtom[i] = local_index;
356 chuckv 406 local_index++;
357 tim 1108
358 gezelter 405 }
359 tim 1108 else
360     globalToLocalAtom[i] = -1;
361 mmeineke 377 }
362 tim 1108
363 gezelter 1203 globalGroupIndex.resize(parallelData->nGroupsLocal);
364     globalToLocalGroup.resize(parallelData->nGroupsGlobal);
365 tim 1108 local_index = 0;
366 gezelter 1203 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
367     if (GroupToProcMap[i] == parallelData->myNode) {
368     globalGroupIndex[local_index] = i;
369    
370     globalToLocalGroup[i] = local_index;
371     local_index++;
372    
373     }
374     else
375     globalToLocalGroup[i] = -1;
376     }
377    
378     globalMolIndex.resize(parallelData->nMolLocal);
379     globalToLocalMol.resize(parallelData->nMolGlobal);
380     local_index = 0;
381     for (i = 0; i < parallelData->nMolGlobal; i++) {
382     if (MolToProcMap[i] == parallelData->myNode) {
383 tim 1108 globalMolIndex[local_index] = i;
384     globalToLocalMol[i] = local_index;
385     local_index++;
386     }
387     else
388     globalToLocalMol[i] = -1;
389     }
390 chuckv 432
391 mmeineke 377 }
392    
393    
394     void mpiSimulation::mpiRefresh( void ){
395    
396     int isError, i;
397 gezelter 1203 int *globalAtomIndex = new int[parallelData->nAtomsLocal];
398 mmeineke 377
399 chuckv 441 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
400     // not barf
401 mmeineke 377
402 gezelter 1203 for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
403 mmeineke 377
404     isError = 0;
405 gezelter 1203 setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError );
406 mmeineke 377 if( isError ){
407    
408     sprintf( painCave.errMsg,
409     "mpiRefresh errror: fortran didn't like something we gave it.\n" );
410     painCave.isFatal = 1;
411     simError();
412     }
413    
414 gezelter 1203 delete[] globalAtomIndex;
415 mmeineke 377
416 gezelter 1203
417 mmeineke 377 sprintf( checkPointMsg,
418     " mpiRefresh successful.\n" );
419     MPIcheckPoint();
420     }
421    
422    
423     #endif // is_mpi