ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 419 by gezelter, Thu Mar 27 15:07:29 2003 UTC vs.
Revision 1108 by tim, Wed Apr 14 15:37:41 2004 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2 <
3 < #include <cstdlib>
4 < #include <cstring>
5 < #include <cmath>
2 > #include <iostream>
3 > #include <stdlib.h>
4 > #include <string.h>
5 > #include <math.h>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
14 #define BASE_SEED 123456789
15
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
# Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
17    entryPlug = the_entryPlug;
18    mpiPlug = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21    mpiPlug->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
# Line 43 | Line 40 | int* mpiSimulation::divideLabor( void ){
40    
41   }
42  
43 < int* mpiSimulation::divideLabor( void ){
43 > void mpiSimulation::divideLabor( ){
44  
48  int* globalIndex;
49
45    int nComponents;
46    MoleculeStamp** compStamps;
47    randomSPRNG *myRandom;
# Line 60 | Line 55 | int* mpiSimulation::divideLabor( void ){
55    int old_atoms, add_atoms, new_atoms;
56  
57    int nTarget;
58 <  int molIndex, atomIndex, compIndex, compStart;
58 >  int molIndex, atomIndex;
59    int done;
65  int nLocal, molLocal;
60    int i, j, loops, which_proc, nmol_local, natoms_local;
61    int nmol_global, natoms_global;
62 <  int local_index, index;
63 <  int smallDiff, bigDiff;
70 <  int baseSeed = BASE_SEED;
62 >  int local_index;
63 >  int baseSeed = entryPlug->getSeed();
64  
72  int testSum;
73
65    nComponents = entryPlug->nComponents;
66    compStamps = entryPlug->compStamps;
67    componentsNmol = entryPlug->componentsNmol;
# Line 85 | Line 76 | int* mpiSimulation::divideLabor( void ){
76  
77    myRandom = new randomSPRNG( baseSeed );
78  
79 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
79 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
80  
81    // Initialize things that we'll send out later:
82    for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
# Line 133 | Line 124 | int* mpiSimulation::divideLabor( void ){
124          // How many atoms does this processor have?
125          
126          old_atoms = AtomsPerProc[which_proc];
136
137        // If the processor already had too many atoms, just skip this
138        // processor and try again.
139
140        if (old_atoms >= nTarget) continue;
141
127          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
128          new_atoms = old_atoms + add_atoms;
144    
145        // If we can add this molecule to this processor without sending
146        // it above nTarget, then go ahead and do it:
147    
148        if (new_atoms <= nTarget) {
149          MolToProcMap[i] = which_proc;
150          AtomsPerProc[which_proc] += add_atoms;
151          for (j = 0 ; j < add_atoms; j++ ) {
152            atomIndex++;
153            AtomToProcMap[atomIndex] = which_proc;
154          }
155          done = 1;
156          continue;
157        }
129  
130          // If we've been through this loop too many times, we need
131          // to just give up and assign the molecule to this processor
# Line 172 | Line 143 | int* mpiSimulation::divideLabor( void ){
143            MolToProcMap[i] = which_proc;
144            AtomsPerProc[which_proc] += add_atoms;
145            for (j = 0 ; j < add_atoms; j++ ) {
146 <            atomIndex++;
147 <            AtomToProcMap[atomIndex] = which_proc;
146 >            AtomToProcMap[atomIndex] = which_proc;
147 >            atomIndex++;
148            }
149            done = 1;
150            continue;
151          }
152 +    
153 +        // If we can add this molecule to this processor without sending
154 +        // it above nTarget, then go ahead and do it:
155 +    
156 +        if (new_atoms <= nTarget) {
157 +          MolToProcMap[i] = which_proc;
158 +          AtomsPerProc[which_proc] += add_atoms;
159 +          for (j = 0 ; j < add_atoms; j++ ) {
160 +            AtomToProcMap[atomIndex] = which_proc;
161 +            atomIndex++;
162 +          }
163 +          done = 1;
164 +          continue;
165 +        }
166  
182        // The only situation left is where old_atoms < nTarget, but
183        // new_atoms > nTarget.   We want to accept this with some
184        // probability that dies off the farther we are from nTarget
167  
168 +        // The only situation left is when new_atoms > nTarget.  We
169 +        // want to accept this with some probability that dies off the
170 +        // farther we are from nTarget
171 +
172          // roughly:  x = new_atoms - nTarget
173          //           Pacc(x) = exp(- a * x)
174 <        // where a = 1 / (average atoms per molecule)
174 >        // where a = penalty / (average atoms per molecule)
175  
176          x = (double) (new_atoms - nTarget);
177          y = myRandom->getRandom();
178 <        
179 <        if (exp(- a * x) > y) {
178 >      
179 >        if (y < exp(- a * x)) {
180            MolToProcMap[i] = which_proc;
181            AtomsPerProc[which_proc] += add_atoms;
182            for (j = 0 ; j < add_atoms; j++ ) {
183 <            atomIndex++;
184 <            AtomToProcMap[atomIndex] = which_proc;
185 <          }
183 >            AtomToProcMap[atomIndex] = which_proc;
184 >            atomIndex++;
185 >           }
186            done = 1;
187            continue;
188          } else {
# Line 208 | Line 194 | int* mpiSimulation::divideLabor( void ){
194  
195      // Spray out this nonsense to all other processors:
196  
197 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 <                          MPI_INT, 0);
197 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 >              MPI_INT, 0, MPI_COMM_WORLD);
199  
200 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 <                          MPI_INT, 0);
200 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 >              MPI_INT, 0, MPI_COMM_WORLD);
202  
203 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 <                          MPI_INT, 0);
203 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 >              MPI_INT, 0, MPI_COMM_WORLD);
205  
206 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 <                          MPI_INT, 0);    
206 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 >              MPI_INT, 0, MPI_COMM_WORLD);    
208    } else {
209  
210      // Listen to your marching orders from processor 0:
211      
212 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 <                          MPI_INT, 0);
212 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 >              MPI_INT, 0, MPI_COMM_WORLD);
214      
215 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 <                          MPI_INT, 0);
215 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 >              MPI_INT, 0, MPI_COMM_WORLD);
217  
218 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 <                          MPI_INT, 0);
218 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 >              MPI_INT, 0, MPI_COMM_WORLD);
220      
221 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 <                          MPI_INT, 0);
221 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 >              MPI_INT, 0, MPI_COMM_WORLD);
223 >
224 >
225    }
226  
227  
# Line 253 | Line 241 | int* mpiSimulation::divideLabor( void ){
241      }
242    }
243  
244 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
245 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
244 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
245 >                MPI_COMM_WORLD);
246 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
247 >                MPI_SUM, MPI_COMM_WORLD);
248    
249    if( nmol_global != entryPlug->n_mol ){
250      sprintf( painCave.errMsg,
# Line 281 | Line 271 | int* mpiSimulation::divideLabor( void ){
271    mpiPlug->myNMol = nmol_local;
272    mpiPlug->myNlocal = natoms_local;
273  
274 <  globalIndex = new int[mpiPlug->myNlocal];
274 >  globalAtomIndex.resize(mpiPlug->myNlocal);
275    local_index = 0;
276    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
277      if (AtomToProcMap[i] == mpiPlug->myNode) {
278 +      globalAtomIndex[local_index] = i;
279 +
280 +      globalToLocalAtom[i] = local_index;
281        local_index++;
282 <      globalIndex[local_index] = i;
282 >      
283      }
284 +    else
285 +       globalToLocalAtom[i] = -1;
286    }
287 <
288 <  return globalIndex;
287 >
288 >  globalMolIndex.resize(mpiPlug->myNMol);
289 >  local_index = 0;
290 >  for (i = 0; i < mpiPlug->nMolGlobal; i++) {
291 >    if (MolToProcMap[i] == mpiPlug->myNode) {
292 >      globalMolIndex[local_index] = i;
293 >      globalToLocalMol[i] = local_index;
294 >      local_index++;
295 >    }
296 >    else
297 >      globalToLocalMol[i] = -1;
298 >  }
299 >  
300   }
301  
302  
# Line 299 | Line 305 | void mpiSimulation::mpiRefresh( void ){
305    int isError, i;
306    int *globalIndex = new int[mpiPlug->myNlocal];
307  
308 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
308 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
309 >  // not barf
310  
311 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
312 +
313    
314    isError = 0;
315    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines