ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 434 by chuckv, Fri Mar 28 19:30:59 2003 UTC vs.
Revision 1108 by tim, Wed Apr 14 15:37:41 2004 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2   #include <iostream>
3 < #include <cstdlib>
4 < #include <cstring>
5 < #include <cmath>
3 > #include <stdlib.h>
4 > #include <string.h>
5 > #include <math.h>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
14 #define BASE_SEED 123456789
15
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
# Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
17    entryPlug = the_entryPlug;
18    mpiPlug = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21    mpiPlug->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
# Line 43 | Line 40 | int* mpiSimulation::divideLabor( void ){
40    
41   }
42  
43 < int* mpiSimulation::divideLabor( void ){
43 > void mpiSimulation::divideLabor( ){
44  
48  int* globalIndex;
49
45    int nComponents;
46    MoleculeStamp** compStamps;
47    randomSPRNG *myRandom;
# Line 60 | Line 55 | int* mpiSimulation::divideLabor( void ){
55    int old_atoms, add_atoms, new_atoms;
56  
57    int nTarget;
58 <  int molIndex, atomIndex, compIndex, compStart;
58 >  int molIndex, atomIndex;
59    int done;
65  int nLocal, molLocal;
60    int i, j, loops, which_proc, nmol_local, natoms_local;
61    int nmol_global, natoms_global;
62 <  int local_index, index;
63 <  int smallDiff, bigDiff;
70 <  int baseSeed = BASE_SEED;
62 >  int local_index;
63 >  int baseSeed = entryPlug->getSeed();
64  
72  int testSum;
73
65    nComponents = entryPlug->nComponents;
66    compStamps = entryPlug->compStamps;
67    componentsNmol = entryPlug->componentsNmol;
# Line 203 | Line 194 | int* mpiSimulation::divideLabor( void ){
194  
195      // Spray out this nonsense to all other processors:
196  
197 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 <                          MPI_INT, 0);
197 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 >              MPI_INT, 0, MPI_COMM_WORLD);
199  
200 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 <                          MPI_INT, 0);
200 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 >              MPI_INT, 0, MPI_COMM_WORLD);
202  
203 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 <                          MPI_INT, 0);
203 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 >              MPI_INT, 0, MPI_COMM_WORLD);
205  
206 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 <                          MPI_INT, 0);    
206 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 >              MPI_INT, 0, MPI_COMM_WORLD);    
208    } else {
209  
210      // Listen to your marching orders from processor 0:
211      
212 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 <                          MPI_INT, 0);
214 <    
215 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 <                          MPI_INT, 0);
212 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 >              MPI_INT, 0, MPI_COMM_WORLD);
214 >    
215 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 >              MPI_INT, 0, MPI_COMM_WORLD);
217  
218 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 <                          MPI_INT, 0);
218 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 >              MPI_INT, 0, MPI_COMM_WORLD);
220      
221 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 <                          MPI_INT, 0);
221 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 >              MPI_INT, 0, MPI_COMM_WORLD);
223  
224  
225    }
# Line 250 | Line 241 | int* mpiSimulation::divideLabor( void ){
241      }
242    }
243  
244 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
245 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
244 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
245 >                MPI_COMM_WORLD);
246 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
247 >                MPI_SUM, MPI_COMM_WORLD);
248    
249    if( nmol_global != entryPlug->n_mol ){
250      sprintf( painCave.errMsg,
# Line 278 | Line 271 | int* mpiSimulation::divideLabor( void ){
271    mpiPlug->myNMol = nmol_local;
272    mpiPlug->myNlocal = natoms_local;
273  
274 <  globalIndex = new int[mpiPlug->myNlocal];
274 >  globalAtomIndex.resize(mpiPlug->myNlocal);
275    local_index = 0;
276    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
277      if (AtomToProcMap[i] == mpiPlug->myNode) {
278 <      globalIndex[local_index] = i;
278 >      globalAtomIndex[local_index] = i;
279 >
280 >      globalToLocalAtom[i] = local_index;
281        local_index++;
282 +      
283      }
284 +    else
285 +       globalToLocalAtom[i] = -1;
286    }
287 +
288 +  globalMolIndex.resize(mpiPlug->myNMol);
289 +  local_index = 0;
290 +  for (i = 0; i < mpiPlug->nMolGlobal; i++) {
291 +    if (MolToProcMap[i] == mpiPlug->myNode) {
292 +      globalMolIndex[local_index] = i;
293 +      globalToLocalMol[i] = local_index;
294 +      local_index++;
295 +    }
296 +    else
297 +      globalToLocalMol[i] = -1;
298 +  }
299    
290  return globalIndex;
300   }
301  
302  
# Line 296 | Line 305 | void mpiSimulation::mpiRefresh( void ){
305    int isError, i;
306    int *globalIndex = new int[mpiPlug->myNlocal];
307  
308 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
308 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
309 >  // not barf
310  
311 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
312 +
313    
314    isError = 0;
315    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines