ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 419 by gezelter, Thu Mar 27 15:07:29 2003 UTC vs.
Revision 1198 by tim, Thu May 27 00:48:12 2004 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2 <
3 < #include <cstdlib>
4 < #include <cstring>
5 < #include <cmath>
2 > #include <iostream>
3 > #include <stdlib.h>
4 > #include <string.h>
5 > #include <math.h>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
14 #define BASE_SEED 123456789
15
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
# Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
17    entryPlug = the_entryPlug;
18    mpiPlug = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) );
21    mpiPlug->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
# Line 43 | Line 40 | int* mpiSimulation::divideLabor( void ){
40    
41   }
42  
43 < int* mpiSimulation::divideLabor( void ){
43 > void mpiSimulation::divideLabor( ){
44  
48  int* globalIndex;
49
45    int nComponents;
46    MoleculeStamp** compStamps;
47    randomSPRNG *myRandom;
# Line 60 | Line 55 | int* mpiSimulation::divideLabor( void ){
55    int old_atoms, add_atoms, new_atoms;
56  
57    int nTarget;
58 <  int molIndex, atomIndex, compIndex, compStart;
58 >  int molIndex, atomIndex;
59    int done;
65  int nLocal, molLocal;
60    int i, j, loops, which_proc, nmol_local, natoms_local;
61    int nmol_global, natoms_global;
62 <  int local_index, index;
63 <  int smallDiff, bigDiff;
70 <  int baseSeed = BASE_SEED;
62 >  int local_index;
63 >  int baseSeed = entryPlug->getSeed();
64  
72  int testSum;
73
65    nComponents = entryPlug->nComponents;
66    compStamps = entryPlug->compStamps;
67    componentsNmol = entryPlug->componentsNmol;
68 <  AtomsPerProc = new int[mpiPlug->numberProcessors];
68 >  AtomsPerProc = new int[mpiPlug->nProcessors];
69    
70    mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
71    mpiPlug->nBondsGlobal = entryPlug->n_bonds;
# Line 82 | Line 73 | int* mpiSimulation::divideLabor( void ){
73    mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
74    mpiPlug->nSRIGlobal = entryPlug->n_SRI;
75    mpiPlug->nMolGlobal = entryPlug->n_mol;
76 +  mpiPlug->nGroupsGlobal = entryPlug->ngroup;
77  
78    myRandom = new randomSPRNG( baseSeed );
79  
80 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
80 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
81  
82    // Initialize things that we'll send out later:
83 <  for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
83 >  for (i = 0; i < mpiPlug->nProcessors; i++ ) {
84      AtomsPerProc[i] = 0;
85    }
86    for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
# Line 103 | Line 95 | int* mpiSimulation::divideLabor( void ){
95      
96    if (mpiPlug->myNode == 0) {
97      numerator = (double) entryPlug->n_atoms;
98 <    denominator = (double) mpiPlug->numberProcessors;
98 >    denominator = (double) mpiPlug->nProcessors;
99      precast = numerator / denominator;
100      nTarget = (int)( precast + 0.5 );
101  
# Line 128 | Line 120 | int* mpiSimulation::divideLabor( void ){
120          
121          // Pick a processor at random
122  
123 <        which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
123 >        which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors);
124  
125          // How many atoms does this processor have?
126          
127          old_atoms = AtomsPerProc[which_proc];
136
137        // If the processor already had too many atoms, just skip this
138        // processor and try again.
139
140        if (old_atoms >= nTarget) continue;
141
128          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
129          new_atoms = old_atoms + add_atoms;
144    
145        // If we can add this molecule to this processor without sending
146        // it above nTarget, then go ahead and do it:
147    
148        if (new_atoms <= nTarget) {
149          MolToProcMap[i] = which_proc;
150          AtomsPerProc[which_proc] += add_atoms;
151          for (j = 0 ; j < add_atoms; j++ ) {
152            atomIndex++;
153            AtomToProcMap[atomIndex] = which_proc;
154          }
155          done = 1;
156          continue;
157        }
130  
131          // If we've been through this loop too many times, we need
132          // to just give up and assign the molecule to this processor
# Line 172 | Line 144 | int* mpiSimulation::divideLabor( void ){
144            MolToProcMap[i] = which_proc;
145            AtomsPerProc[which_proc] += add_atoms;
146            for (j = 0 ; j < add_atoms; j++ ) {
147 <            atomIndex++;
148 <            AtomToProcMap[atomIndex] = which_proc;
147 >            AtomToProcMap[atomIndex] = which_proc;
148 >            atomIndex++;
149            }
150            done = 1;
151            continue;
152          }
153 +    
154 +        // If we can add this molecule to this processor without sending
155 +        // it above nTarget, then go ahead and do it:
156 +    
157 +        if (new_atoms <= nTarget) {
158 +          MolToProcMap[i] = which_proc;
159 +          AtomsPerProc[which_proc] += add_atoms;
160 +          for (j = 0 ; j < add_atoms; j++ ) {
161 +            AtomToProcMap[atomIndex] = which_proc;
162 +            atomIndex++;
163 +          }
164 +          done = 1;
165 +          continue;
166 +        }
167  
182        // The only situation left is where old_atoms < nTarget, but
183        // new_atoms > nTarget.   We want to accept this with some
184        // probability that dies off the farther we are from nTarget
168  
169 +        // The only situation left is when new_atoms > nTarget.  We
170 +        // want to accept this with some probability that dies off the
171 +        // farther we are from nTarget
172 +
173          // roughly:  x = new_atoms - nTarget
174          //           Pacc(x) = exp(- a * x)
175 <        // where a = 1 / (average atoms per molecule)
175 >        // where a = penalty / (average atoms per molecule)
176  
177          x = (double) (new_atoms - nTarget);
178          y = myRandom->getRandom();
179 <        
180 <        if (exp(- a * x) > y) {
179 >      
180 >        if (y < exp(- a * x)) {
181            MolToProcMap[i] = which_proc;
182            AtomsPerProc[which_proc] += add_atoms;
183            for (j = 0 ; j < add_atoms; j++ ) {
184 <            atomIndex++;
185 <            AtomToProcMap[atomIndex] = which_proc;
186 <          }
184 >            AtomToProcMap[atomIndex] = which_proc;
185 >            atomIndex++;
186 >           }
187            done = 1;
188            continue;
189          } else {
# Line 208 | Line 195 | int* mpiSimulation::divideLabor( void ){
195  
196      // Spray out this nonsense to all other processors:
197  
198 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
199 <                          MPI_INT, 0);
198 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
199 >              MPI_INT, 0, MPI_COMM_WORLD);
200  
201 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
202 <                          MPI_INT, 0);
201 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
202 >              MPI_INT, 0, MPI_COMM_WORLD);
203  
204 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
205 <                          MPI_INT, 0);
204 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
205 >              MPI_INT, 0, MPI_COMM_WORLD);
206  
207 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
208 <                          MPI_INT, 0);    
207 >    MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors,
208 >              MPI_INT, 0, MPI_COMM_WORLD);    
209    } else {
210  
211      // Listen to your marching orders from processor 0:
212      
213 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
214 <                          MPI_INT, 0);
213 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
214 >              MPI_INT, 0, MPI_COMM_WORLD);
215      
216 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
217 <                          MPI_INT, 0);
216 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
217 >              MPI_INT, 0, MPI_COMM_WORLD);
218  
219 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
220 <                          MPI_INT, 0);
219 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
220 >              MPI_INT, 0, MPI_COMM_WORLD);
221      
222 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
223 <                          MPI_INT, 0);
222 >    MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors,
223 >              MPI_INT, 0, MPI_COMM_WORLD);
224 >
225 >
226    }
227  
228  
# Line 253 | Line 242 | int* mpiSimulation::divideLabor( void ){
242      }
243    }
244  
245 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
246 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
245 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
246 >                MPI_COMM_WORLD);
247 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
248 >                MPI_SUM, MPI_COMM_WORLD);
249    
250    if( nmol_global != entryPlug->n_mol ){
251      sprintf( painCave.errMsg,
# Line 278 | Line 269 | int* mpiSimulation::divideLabor( void ){
269             "Successfully divided the molecules among the processors.\n" );
270    MPIcheckPoint();
271  
272 <  mpiPlug->myNMol = nmol_local;
273 <  mpiPlug->myNlocal = natoms_local;
272 >  mpiPlug->nMolLocal = nmol_local;
273 >  mpiPlug->nAtomsLocal = natoms_local;
274  
275 <  globalIndex = new int[mpiPlug->myNlocal];
275 >  globalAtomIndex.resize(mpiPlug->nAtomsLocal);
276 >  globalToLocalAtom.resize(mpiPlug->nAtomsGlobal);
277    local_index = 0;
278    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
279      if (AtomToProcMap[i] == mpiPlug->myNode) {
280 +      globalAtomIndex[local_index] = i;
281 +
282 +      globalToLocalAtom[i] = local_index;
283        local_index++;
284 <      globalIndex[local_index] = i;
284 >      
285      }
286 +    else
287 +       globalToLocalAtom[i] = -1;
288    }
289 <
290 <  return globalIndex;
289 >
290 >  globalMolIndex.resize(mpiPlug->nMolLocal);
291 >  globalToLocalMol.resize(mpiPlug->nMolGlobal);
292 >  
293 >  local_index = 0;
294 >  for (i = 0; i < mpiPlug->nMolGlobal; i++) {
295 >    if (MolToProcMap[i] == mpiPlug->myNode) {
296 >      globalMolIndex[local_index] = i;
297 >      globalToLocalMol[i] = local_index;
298 >      local_index++;
299 >    }
300 >    else
301 >      globalToLocalMol[i] = -1;
302 >  }
303 >  
304   }
305  
306  
307   void mpiSimulation::mpiRefresh( void ){
308  
309    int isError, i;
310 <  int *globalIndex = new int[mpiPlug->myNlocal];
310 >  int *globalIndex = new int[mpiPlug->nAtomsLocal];
311  
312 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
312 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
313 >  // not barf
314  
315 +  for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
316 +
317    
318    isError = 0;
319    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines