ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 419 by gezelter, Thu Mar 27 15:07:29 2003 UTC vs.
Revision 1203 by gezelter, Thu May 27 18:59:17 2004 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2 <
3 < #include <cstdlib>
4 < #include <cstring>
5 < #include <cmath>
2 > #include <iostream>
3 > #include <stdlib.h>
4 > #include <string.h>
5 > #include <math.h>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
14 #define BASE_SEED 123456789
15
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16   {
17    entryPlug = the_entryPlug;
18 <  mpiPlug = new mpiSimData;
18 >  parallelData = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
21 <  mpiPlug->myNode = worldRank;
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21 >  parallelData->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
24    MolComponentType = new int[entryPlug->n_mol];
25    AtomToProcMap = new int[entryPlug->n_atoms];
26 +  GroupToProcMap = new int[entryPlug->ngroup];
27  
28    mpiSim = this;
29    wrapMeSimParallel( this );
# Line 37 | Line 35 | mpiSimulation::~mpiSimulation(){
35    delete[] MolToProcMap;
36    delete[] MolComponentType;
37    delete[] AtomToProcMap;
38 +  delete[] GroupToProcMap;
39  
40 <  delete mpiPlug;
40 >  delete parallelData;
41    // perhaps we should let fortran know the party is over.
42    
43   }
44  
45 < int* mpiSimulation::divideLabor( void ){
45 > void mpiSimulation::divideLabor( ){
46  
48  int* globalIndex;
49
47    int nComponents;
48    MoleculeStamp** compStamps;
49    randomSPRNG *myRandom;
50    int* componentsNmol;
51    int* AtomsPerProc;
52 +  int* GroupsPerProc;
53  
54    double numerator;
55    double denominator;
56    double precast;
57    double x, y, a;
58    int old_atoms, add_atoms, new_atoms;
59 +  int old_groups, add_groups, new_groups;
60  
61    int nTarget;
62 <  int molIndex, atomIndex, compIndex, compStart;
62 >  int molIndex, atomIndex, groupIndex;
63    int done;
64 <  int nLocal, molLocal;
65 <  int i, j, loops, which_proc, nmol_local, natoms_local;
66 <  int nmol_global, natoms_global;
67 <  int local_index, index;
68 <  int smallDiff, bigDiff;
69 <  int baseSeed = BASE_SEED;
64 >  int i, j, loops, which_proc;
65 >  int nmol_global, nmol_local;
66 >  int ngroups_global, ngroups_local;
67 >  int natoms_global, natoms_local;
68 >  int ncutoff_groups, nAtomsInGroups;
69 >  int local_index;
70 >  int baseSeed = entryPlug->getSeed();
71 >  CutoffGroupStamp* cg;
72  
72  int testSum;
73
73    nComponents = entryPlug->nComponents;
74    compStamps = entryPlug->compStamps;
75    componentsNmol = entryPlug->componentsNmol;
76 <  AtomsPerProc = new int[mpiPlug->numberProcessors];
76 >  AtomsPerProc = new int[parallelData->nProcessors];
77 >  GroupsPerProc = new int[parallelData->nProcessors];
78    
79 <  mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
80 <  mpiPlug->nBondsGlobal = entryPlug->n_bonds;
81 <  mpiPlug->nBendsGlobal = entryPlug->n_bends;
82 <  mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
83 <  mpiPlug->nSRIGlobal = entryPlug->n_SRI;
84 <  mpiPlug->nMolGlobal = entryPlug->n_mol;
79 >  parallelData->nAtomsGlobal = entryPlug->n_atoms;
80 >  parallelData->nBondsGlobal = entryPlug->n_bonds;
81 >  parallelData->nBendsGlobal = entryPlug->n_bends;
82 >  parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83 >  parallelData->nSRIGlobal = entryPlug->n_SRI;
84 >  parallelData->nGroupsGlobal = entryPlug->ngroup;
85 >  parallelData->nMolGlobal = entryPlug->n_mol;
86  
87    myRandom = new randomSPRNG( baseSeed );
88  
89 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89 >  a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
90  
91    // Initialize things that we'll send out later:
92 <  for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
92 >  for (i = 0; i < parallelData->nProcessors; i++ ) {
93      AtomsPerProc[i] = 0;
94 +    GroupsPerProc[i] = 0;
95    }
96 <  for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
96 >  for (i = 0; i < parallelData->nMolGlobal; i++ ) {
97      // default to an error condition:
98      MolToProcMap[i] = -1;
99      MolComponentType[i] = -1;
100    }
101 <  for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
101 >  for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
102      // default to an error condition:
103      AtomToProcMap[i] = -1;
104    }
105 +  for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
106 +    // default to an error condition:
107 +    GroupToProcMap[i] = -1;
108 +  }
109      
110 <  if (mpiPlug->myNode == 0) {
110 >  if (parallelData->myNode == 0) {
111      numerator = (double) entryPlug->n_atoms;
112 <    denominator = (double) mpiPlug->numberProcessors;
112 >    denominator = (double) parallelData->nProcessors;
113      precast = numerator / denominator;
114      nTarget = (int)( precast + 0.5 );
115  
# Line 117 | Line 123 | int* mpiSimulation::divideLabor( void ){
123      }
124  
125      atomIndex = 0;
126 +    groupIndex = 0;
127  
128      for (i = 0; i < molIndex; i++ ) {
129  
# Line 128 | Line 135 | int* mpiSimulation::divideLabor( void ){
135          
136          // Pick a processor at random
137  
138 <        which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
138 >        which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
139  
140          // How many atoms does this processor have?
141          
142          old_atoms = AtomsPerProc[which_proc];
136
137        // If the processor already had too many atoms, just skip this
138        // processor and try again.
139
140        if (old_atoms >= nTarget) continue;
141
143          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144          new_atoms = old_atoms + add_atoms;
145 <    
146 <        // If we can add this molecule to this processor without sending
147 <        // it above nTarget, then go ahead and do it:
148 <    
149 <        if (new_atoms <= nTarget) {
150 <          MolToProcMap[i] = which_proc;
151 <          AtomsPerProc[which_proc] += add_atoms;
151 <          for (j = 0 ; j < add_atoms; j++ ) {
152 <            atomIndex++;
153 <            AtomToProcMap[atomIndex] = which_proc;
154 <          }
155 <          done = 1;
156 <          continue;
145 >
146 >        old_groups = GroupsPerProc[which_proc];
147 >        ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
148 >        nAtomsInGroups = 0;
149 >        for (j = 0; j < ncutoff_groups; j++) {
150 >          cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
151 >          nAtomsInGroups += cg->getNMembers();
152          }
153 +        add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;        
154 +        new_groups = old_groups + add_groups;
155  
156          // If we've been through this loop too many times, we need
157          // to just give up and assign the molecule to this processor
# Line 172 | Line 169 | int* mpiSimulation::divideLabor( void ){
169            MolToProcMap[i] = which_proc;
170            AtomsPerProc[which_proc] += add_atoms;
171            for (j = 0 ; j < add_atoms; j++ ) {
172 <            atomIndex++;
173 <            AtomToProcMap[atomIndex] = which_proc;
172 >            AtomToProcMap[atomIndex] = which_proc;
173 >            atomIndex++;
174            }
175 +          GroupsPerProc[which_proc] += add_groups;
176 +          for (j=0; j < add_groups; j++) {
177 +            GroupToProcMap[groupIndex] = which_proc;
178 +            groupIndex++;
179 +          }
180            done = 1;
181            continue;
182          }
183 +    
184 +        // If we can add this molecule to this processor without sending
185 +        // it above nTarget, then go ahead and do it:
186 +    
187 +        if (new_atoms <= nTarget) {
188 +          MolToProcMap[i] = which_proc;
189 +          AtomsPerProc[which_proc] += add_atoms;
190 +          for (j = 0 ; j < add_atoms; j++ ) {
191 +            AtomToProcMap[atomIndex] = which_proc;
192 +            atomIndex++;
193 +          }
194 +          GroupsPerProc[which_proc] += add_groups;
195 +          for (j=0; j < add_groups; j++) {
196 +            GroupToProcMap[groupIndex] = which_proc;
197 +            groupIndex++;
198 +          }
199 +          done = 1;
200 +          continue;
201 +        }
202  
182        // The only situation left is where old_atoms < nTarget, but
183        // new_atoms > nTarget.   We want to accept this with some
184        // probability that dies off the farther we are from nTarget
203  
204 +        // The only situation left is when new_atoms > nTarget.  We
205 +        // want to accept this with some probability that dies off the
206 +        // farther we are from nTarget
207 +
208          // roughly:  x = new_atoms - nTarget
209          //           Pacc(x) = exp(- a * x)
210 <        // where a = 1 / (average atoms per molecule)
210 >        // where a = penalty / (average atoms per molecule)
211  
212          x = (double) (new_atoms - nTarget);
213          y = myRandom->getRandom();
214 <        
215 <        if (exp(- a * x) > y) {
214 >      
215 >        if (y < exp(- a * x)) {
216            MolToProcMap[i] = which_proc;
217            AtomsPerProc[which_proc] += add_atoms;
218            for (j = 0 ; j < add_atoms; j++ ) {
219 <            atomIndex++;
220 <            AtomToProcMap[atomIndex] = which_proc;
219 >            AtomToProcMap[atomIndex] = which_proc;
220 >            atomIndex++;
221 >           }
222 >          GroupsPerProc[which_proc] += add_groups;
223 >          for (j=0; j < add_groups; j++) {
224 >            GroupToProcMap[groupIndex] = which_proc;
225 >            groupIndex++;
226            }
227            done = 1;
228            continue;
# Line 206 | Line 233 | int* mpiSimulation::divideLabor( void ){
233        }
234      }
235  
236 +
237      // Spray out this nonsense to all other processors:
238  
239 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
240 <                          MPI_INT, 0);
239 >    MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
240 >              MPI_INT, 0, MPI_COMM_WORLD);
241  
242 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
243 <                          MPI_INT, 0);
242 >    MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
243 >              MPI_INT, 0, MPI_COMM_WORLD);
244  
245 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
246 <                          MPI_INT, 0);
245 >    MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
246 >              MPI_INT, 0, MPI_COMM_WORLD);
247  
248 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
249 <                          MPI_INT, 0);    
248 >    MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
249 >              MPI_INT, 0, MPI_COMM_WORLD);
250 >
251 >    MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
252 >              MPI_INT, 0, MPI_COMM_WORLD);    
253 >
254 >    MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
255 >              MPI_INT, 0, MPI_COMM_WORLD);    
256    } else {
257  
258      // Listen to your marching orders from processor 0:
259      
260 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
261 <                          MPI_INT, 0);
260 >    MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
261 >              MPI_INT, 0, MPI_COMM_WORLD);
262      
263 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
264 <                          MPI_INT, 0);
263 >    MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
264 >              MPI_INT, 0, MPI_COMM_WORLD);
265  
266 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
267 <                          MPI_INT, 0);
266 >    MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
267 >              MPI_INT, 0, MPI_COMM_WORLD);
268 >
269 >    MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
270 >              MPI_INT, 0, MPI_COMM_WORLD);
271      
272 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
273 <                          MPI_INT, 0);
237 <  }
272 >    MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
273 >              MPI_INT, 0, MPI_COMM_WORLD);
274  
275 +    MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
276 +              MPI_INT, 0, MPI_COMM_WORLD);
277  
278 +
279 +  }
280 +
281    // Let's all check for sanity:
282  
283    nmol_local = 0;
284 <  for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
285 <    if (MolToProcMap[i] == mpiPlug->myNode) {
284 >  for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
285 >    if (MolToProcMap[i] == parallelData->myNode) {
286        nmol_local++;
287      }
288    }
289  
290    natoms_local = 0;
291 <  for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
292 <    if (AtomToProcMap[i] == mpiPlug->myNode) {
291 >  for (i = 0; i < parallelData->nAtomsGlobal; i++) {
292 >    if (AtomToProcMap[i] == parallelData->myNode) {
293        natoms_local++;      
294      }
295    }
296  
297 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
298 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
297 >  ngroups_local = 0;
298 >  for (i = 0; i < parallelData->nGroupsGlobal; i++) {
299 >    if (GroupToProcMap[i] == parallelData->myNode) {
300 >      ngroups_local++;      
301 >    }
302 >  }
303 >
304 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
305 >                MPI_COMM_WORLD);
306 >
307 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
308 >                MPI_SUM, MPI_COMM_WORLD);
309 >
310 >  MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
311 >                MPI_SUM, MPI_COMM_WORLD);
312    
313    if( nmol_global != entryPlug->n_mol ){
314      sprintf( painCave.errMsg,
# Line 274 | Line 328 | int* mpiSimulation::divideLabor( void ){
328      simError();
329    }
330  
331 +  if( ngroups_global != entryPlug->ngroup ){
332 +    sprintf( painCave.errMsg,
333 +             "The sum of all ngroups_local, %d, did not equal the "
334 +             "total number of cutoffGroups, %d.\n",
335 +             ngroups_global, entryPlug->ngroup );
336 +    painCave.isFatal = 1;
337 +    simError();
338 +  }
339 +
340    sprintf( checkPointMsg,
341             "Successfully divided the molecules among the processors.\n" );
342    MPIcheckPoint();
343  
344 <  mpiPlug->myNMol = nmol_local;
345 <  mpiPlug->myNlocal = natoms_local;
344 >  parallelData->nMolLocal = nmol_local;
345 >  parallelData->nAtomsLocal = natoms_local;
346 >  parallelData->nGroupsLocal = ngroups_local;
347  
348 <  globalIndex = new int[mpiPlug->myNlocal];
348 >  globalAtomIndex.resize(parallelData->nAtomsLocal);
349 >  globalToLocalAtom.resize(parallelData->nAtomsGlobal);
350    local_index = 0;
351 <  for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
352 <    if (AtomToProcMap[i] == mpiPlug->myNode) {
351 >  for (i = 0; i < parallelData->nAtomsGlobal; i++) {
352 >    if (AtomToProcMap[i] == parallelData->myNode) {
353 >      globalAtomIndex[local_index] = i;
354 >
355 >      globalToLocalAtom[i] = local_index;
356        local_index++;
357 <      globalIndex[local_index] = i;
357 >      
358      }
359 +    else
360 +       globalToLocalAtom[i] = -1;
361    }
362 <
363 <  return globalIndex;
362 >
363 >  globalGroupIndex.resize(parallelData->nGroupsLocal);
364 >  globalToLocalGroup.resize(parallelData->nGroupsGlobal);
365 >  local_index = 0;
366 >  for (i = 0; i < parallelData->nGroupsGlobal; i++) {
367 >    if (GroupToProcMap[i] == parallelData->myNode) {
368 >      globalGroupIndex[local_index] = i;
369 >
370 >      globalToLocalGroup[i] = local_index;
371 >      local_index++;
372 >      
373 >    }
374 >    else
375 >       globalToLocalGroup[i] = -1;
376 >  }
377 >
378 >  globalMolIndex.resize(parallelData->nMolLocal);
379 >  globalToLocalMol.resize(parallelData->nMolGlobal);  
380 >  local_index = 0;
381 >  for (i = 0; i < parallelData->nMolGlobal; i++) {
382 >    if (MolToProcMap[i] == parallelData->myNode) {
383 >      globalMolIndex[local_index] = i;
384 >      globalToLocalMol[i] = local_index;
385 >      local_index++;
386 >    }
387 >    else
388 >      globalToLocalMol[i] = -1;
389 >  }
390 >  
391   }
392  
393  
394   void mpiSimulation::mpiRefresh( void ){
395  
396    int isError, i;
397 <  int *globalIndex = new int[mpiPlug->myNlocal];
397 >  int *globalAtomIndex = new int[parallelData->nAtomsLocal];
398  
399 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
399 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
400 >  // not barf
401  
402 +  for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
403    
404    isError = 0;
405 <  setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
405 >  setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError );
406    if( isError ){
407  
408      sprintf( painCave.errMsg,
# Line 312 | Line 411 | void mpiSimulation::mpiRefresh( void ){
411      simError();
412    }
413  
414 <  delete[] globalIndex;
414 >  delete[] globalAtomIndex;
415  
416 +
417    sprintf( checkPointMsg,
418             " mpiRefresh successful.\n" );
419    MPIcheckPoint();

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines