ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-1.0/libmdtools/mpiSimulation.cpp
Revision: 1334
Committed: Fri Jul 16 18:58:03 2004 UTC (20 years ago) by gezelter
File size: 12444 byte(s)
Log Message:
Initial import of OOPSE-1.0 source tree

File Contents

# User Rev Content
1 gezelter 1334 #ifdef IS_MPI
2     #include <iostream>
3     #include <stdlib.h>
4     #include <string.h>
5     #include <math.h>
6     #include <mpi.h>
7    
8     #include "mpiSimulation.hpp"
9     #include "simError.h"
10     #include "fortranWrappers.hpp"
11     #include "randomSPRNG.hpp"
12    
13     mpiSimulation* mpiSim;
14    
15     mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16     {
17     entryPlug = the_entryPlug;
18     parallelData = new mpiSimData;
19    
20     MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21     parallelData->myNode = worldRank;
22    
23     MolToProcMap = new int[entryPlug->n_mol];
24     MolComponentType = new int[entryPlug->n_mol];
25     AtomToProcMap = new int[entryPlug->n_atoms];
26     GroupToProcMap = new int[entryPlug->ngroup];
27    
28     mpiSim = this;
29     wrapMeSimParallel( this );
30     }
31    
32    
33     mpiSimulation::~mpiSimulation(){
34    
35     delete[] MolToProcMap;
36     delete[] MolComponentType;
37     delete[] AtomToProcMap;
38     delete[] GroupToProcMap;
39    
40     delete parallelData;
41     // perhaps we should let fortran know the party is over.
42    
43     }
44    
45     void mpiSimulation::divideLabor( ){
46    
47     int nComponents;
48     MoleculeStamp** compStamps;
49     randomSPRNG *myRandom;
50     int* componentsNmol;
51     int* AtomsPerProc;
52     int* GroupsPerProc;
53    
54     double numerator;
55     double denominator;
56     double precast;
57     double x, y, a;
58     int old_atoms, add_atoms, new_atoms;
59     int old_groups, add_groups, new_groups;
60    
61     int nTarget;
62     int molIndex, atomIndex, groupIndex;
63     int done;
64     int i, j, loops, which_proc;
65     int nmol_global, nmol_local;
66     int ngroups_global, ngroups_local;
67     int natoms_global, natoms_local;
68     int ncutoff_groups, nAtomsInGroups;
69     int local_index;
70     int baseSeed = entryPlug->getSeed();
71     CutoffGroupStamp* cg;
72    
73     nComponents = entryPlug->nComponents;
74     compStamps = entryPlug->compStamps;
75     componentsNmol = entryPlug->componentsNmol;
76     AtomsPerProc = new int[parallelData->nProcessors];
77     GroupsPerProc = new int[parallelData->nProcessors];
78    
79     parallelData->nAtomsGlobal = entryPlug->n_atoms;
80     parallelData->nBondsGlobal = entryPlug->n_bonds;
81     parallelData->nBendsGlobal = entryPlug->n_bends;
82     parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83     parallelData->nSRIGlobal = entryPlug->n_SRI;
84     parallelData->nGroupsGlobal = entryPlug->ngroup;
85     parallelData->nMolGlobal = entryPlug->n_mol;
86    
87     myRandom = new randomSPRNG( baseSeed );
88    
89     a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
90    
91     // Initialize things that we'll send out later:
92     for (i = 0; i < parallelData->nProcessors; i++ ) {
93     AtomsPerProc[i] = 0;
94     GroupsPerProc[i] = 0;
95     }
96     for (i = 0; i < parallelData->nMolGlobal; i++ ) {
97     // default to an error condition:
98     MolToProcMap[i] = -1;
99     MolComponentType[i] = -1;
100     }
101     for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
102     // default to an error condition:
103     AtomToProcMap[i] = -1;
104     }
105     for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
106     // default to an error condition:
107     GroupToProcMap[i] = -1;
108     }
109    
110     if (parallelData->myNode == 0) {
111     numerator = (double) entryPlug->n_atoms;
112     denominator = (double) parallelData->nProcessors;
113     precast = numerator / denominator;
114     nTarget = (int)( precast + 0.5 );
115    
116     // Build the array of molecule component types first
117     molIndex = 0;
118     for (i=0; i < nComponents; i++) {
119     for (j=0; j < componentsNmol[i]; j++) {
120     MolComponentType[molIndex] = i;
121     molIndex++;
122     }
123     }
124    
125     atomIndex = 0;
126     groupIndex = 0;
127    
128     for (i = 0; i < molIndex; i++ ) {
129    
130     done = 0;
131     loops = 0;
132    
133     while( !done ){
134     loops++;
135    
136     // Pick a processor at random
137    
138     which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
139    
140     // How many atoms does this processor have?
141    
142     old_atoms = AtomsPerProc[which_proc];
143     add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144     new_atoms = old_atoms + add_atoms;
145    
146     old_groups = GroupsPerProc[which_proc];
147     ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
148     nAtomsInGroups = 0;
149     for (j = 0; j < ncutoff_groups; j++) {
150     cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
151     nAtomsInGroups += cg->getNMembers();
152     }
153     add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;
154     new_groups = old_groups + add_groups;
155    
156     // If we've been through this loop too many times, we need
157     // to just give up and assign the molecule to this processor
158     // and be done with it.
159    
160     if (loops > 100) {
161     sprintf( painCave.errMsg,
162     "I've tried 100 times to assign molecule %d to a "
163     " processor, but can't find a good spot.\n"
164     "I'm assigning it at random to processor %d.\n",
165     i, which_proc);
166     painCave.isFatal = 0;
167     simError();
168    
169     MolToProcMap[i] = which_proc;
170     AtomsPerProc[which_proc] += add_atoms;
171     for (j = 0 ; j < add_atoms; j++ ) {
172     AtomToProcMap[atomIndex] = which_proc;
173     atomIndex++;
174     }
175     GroupsPerProc[which_proc] += add_groups;
176     for (j=0; j < add_groups; j++) {
177     GroupToProcMap[groupIndex] = which_proc;
178     groupIndex++;
179     }
180     done = 1;
181     continue;
182     }
183    
184     // If we can add this molecule to this processor without sending
185     // it above nTarget, then go ahead and do it:
186    
187     if (new_atoms <= nTarget) {
188     MolToProcMap[i] = which_proc;
189     AtomsPerProc[which_proc] += add_atoms;
190     for (j = 0 ; j < add_atoms; j++ ) {
191     AtomToProcMap[atomIndex] = which_proc;
192     atomIndex++;
193     }
194     GroupsPerProc[which_proc] += add_groups;
195     for (j=0; j < add_groups; j++) {
196     GroupToProcMap[groupIndex] = which_proc;
197     groupIndex++;
198     }
199     done = 1;
200     continue;
201     }
202    
203    
204     // The only situation left is when new_atoms > nTarget. We
205     // want to accept this with some probability that dies off the
206     // farther we are from nTarget
207    
208     // roughly: x = new_atoms - nTarget
209     // Pacc(x) = exp(- a * x)
210     // where a = penalty / (average atoms per molecule)
211    
212     x = (double) (new_atoms - nTarget);
213     y = myRandom->getRandom();
214    
215     if (y < exp(- a * x)) {
216     MolToProcMap[i] = which_proc;
217     AtomsPerProc[which_proc] += add_atoms;
218     for (j = 0 ; j < add_atoms; j++ ) {
219     AtomToProcMap[atomIndex] = which_proc;
220     atomIndex++;
221     }
222     GroupsPerProc[which_proc] += add_groups;
223     for (j=0; j < add_groups; j++) {
224     GroupToProcMap[groupIndex] = which_proc;
225     groupIndex++;
226     }
227     done = 1;
228     continue;
229     } else {
230     continue;
231     }
232    
233     }
234     }
235    
236    
237     // Spray out this nonsense to all other processors:
238    
239     //std::cerr << "node 0 mol2proc = \n";
240     //for (i = 0; i < parallelData->nMolGlobal; i++)
241     // std::cerr << i << "\t" << MolToProcMap[i] << "\n";
242    
243     MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
244     MPI_INT, 0, MPI_COMM_WORLD);
245    
246     MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
247     MPI_INT, 0, MPI_COMM_WORLD);
248    
249     MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
250     MPI_INT, 0, MPI_COMM_WORLD);
251    
252     MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
253     MPI_INT, 0, MPI_COMM_WORLD);
254    
255     MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
256     MPI_INT, 0, MPI_COMM_WORLD);
257    
258     MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
259     MPI_INT, 0, MPI_COMM_WORLD);
260     } else {
261    
262     // Listen to your marching orders from processor 0:
263    
264     MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
265     MPI_INT, 0, MPI_COMM_WORLD);
266    
267     MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
268     MPI_INT, 0, MPI_COMM_WORLD);
269    
270     MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
271     MPI_INT, 0, MPI_COMM_WORLD);
272    
273     MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
274     MPI_INT, 0, MPI_COMM_WORLD);
275    
276     MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
277     MPI_INT, 0, MPI_COMM_WORLD);
278    
279     MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
280     MPI_INT, 0, MPI_COMM_WORLD);
281    
282    
283     }
284    
285     // Let's all check for sanity:
286    
287     nmol_local = 0;
288     for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
289     if (MolToProcMap[i] == parallelData->myNode) {
290     nmol_local++;
291     }
292     }
293    
294     natoms_local = 0;
295     for (i = 0; i < parallelData->nAtomsGlobal; i++) {
296     if (AtomToProcMap[i] == parallelData->myNode) {
297     natoms_local++;
298     }
299     }
300    
301     ngroups_local = 0;
302     for (i = 0; i < parallelData->nGroupsGlobal; i++) {
303     if (GroupToProcMap[i] == parallelData->myNode) {
304     ngroups_local++;
305     }
306     }
307    
308     MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
309     MPI_COMM_WORLD);
310    
311     MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
312     MPI_SUM, MPI_COMM_WORLD);
313    
314     MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
315     MPI_SUM, MPI_COMM_WORLD);
316    
317     if( nmol_global != entryPlug->n_mol ){
318     sprintf( painCave.errMsg,
319     "The sum of all nmol_local, %d, did not equal the "
320     "total number of molecules, %d.\n",
321     nmol_global, entryPlug->n_mol );
322     painCave.isFatal = 1;
323     simError();
324     }
325    
326     if( natoms_global != entryPlug->n_atoms ){
327     sprintf( painCave.errMsg,
328     "The sum of all natoms_local, %d, did not equal the "
329     "total number of atoms, %d.\n",
330     natoms_global, entryPlug->n_atoms );
331     painCave.isFatal = 1;
332     simError();
333     }
334    
335     if( ngroups_global != entryPlug->ngroup ){
336     sprintf( painCave.errMsg,
337     "The sum of all ngroups_local, %d, did not equal the "
338     "total number of cutoffGroups, %d.\n",
339     ngroups_global, entryPlug->ngroup );
340     painCave.isFatal = 1;
341     simError();
342     }
343    
344     sprintf( checkPointMsg,
345     "Successfully divided the molecules among the processors.\n" );
346     MPIcheckPoint();
347    
348     parallelData->nMolLocal = nmol_local;
349     parallelData->nAtomsLocal = natoms_local;
350     parallelData->nGroupsLocal = ngroups_local;
351    
352     globalAtomIndex.resize(parallelData->nAtomsLocal);
353     globalToLocalAtom.resize(parallelData->nAtomsGlobal);
354     local_index = 0;
355     for (i = 0; i < parallelData->nAtomsGlobal; i++) {
356     if (AtomToProcMap[i] == parallelData->myNode) {
357     globalAtomIndex[local_index] = i;
358    
359     globalToLocalAtom[i] = local_index;
360     local_index++;
361    
362     }
363     else
364     globalToLocalAtom[i] = -1;
365     }
366    
367     globalGroupIndex.resize(parallelData->nGroupsLocal);
368     globalToLocalGroup.resize(parallelData->nGroupsGlobal);
369     local_index = 0;
370     for (i = 0; i < parallelData->nGroupsGlobal; i++) {
371     if (GroupToProcMap[i] == parallelData->myNode) {
372     globalGroupIndex[local_index] = i;
373    
374     globalToLocalGroup[i] = local_index;
375     local_index++;
376    
377     }
378     else
379     globalToLocalGroup[i] = -1;
380     }
381    
382     globalMolIndex.resize(parallelData->nMolLocal);
383     globalToLocalMol.resize(parallelData->nMolGlobal);
384     local_index = 0;
385     for (i = 0; i < parallelData->nMolGlobal; i++) {
386     if (MolToProcMap[i] == parallelData->myNode) {
387     globalMolIndex[local_index] = i;
388     globalToLocalMol[i] = local_index;
389     local_index++;
390     }
391     else
392     globalToLocalMol[i] = -1;
393     }
394    
395     }
396    
397    
398     void mpiSimulation::mpiRefresh( void ){
399    
400     int isError, i;
401     int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal];
402     int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal];
403    
404     // Fortran indexing needs to be increased by 1 in order to get the 2
405     // languages to not barf
406    
407     for(i = 0; i < parallelData->nAtomsLocal; i++)
408     localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1;
409    
410     for(i = 0; i < parallelData->nGroupsLocal; i++)
411     localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1;
412    
413     isError = 0;
414    
415     setFsimParallel( parallelData,
416     &(parallelData->nAtomsLocal), localToGlobalAtomIndex,
417     &(parallelData->nGroupsLocal), localToGlobalGroupIndex,
418     &isError );
419    
420     if( isError ){
421    
422     sprintf( painCave.errMsg,
423     "mpiRefresh errror: fortran didn't like something we gave it.\n" );
424     painCave.isFatal = 1;
425     simError();
426     }
427    
428     delete[] localToGlobalGroupIndex;
429     delete[] localToGlobalAtomIndex;
430    
431    
432     sprintf( checkPointMsg,
433     " mpiRefresh successful.\n" );
434     MPIcheckPoint();
435     }
436    
437    
438     #endif // is_mpi