ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 432 by chuckv, Thu Mar 27 23:33:40 2003 UTC vs.
Revision 726 by tim, Tue Aug 26 20:37:30 2003 UTC

# Line 4 | Line 4
4   #include <cstring>
5   #include <cmath>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
14 #define BASE_SEED 123456789
15
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
# Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
17    entryPlug = the_entryPlug;
18    mpiPlug = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21    mpiPlug->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
# Line 67 | Line 64 | int* mpiSimulation::divideLabor( void ){
64    int nmol_global, natoms_global;
65    int local_index, index;
66    int smallDiff, bigDiff;
67 <  int baseSeed = BASE_SEED;
67 >  int baseSeed = entryPlug->getSeed();
68  
69    int testSum;
70  
# Line 83 | Line 80 | int* mpiSimulation::divideLabor( void ){
80    mpiPlug->nSRIGlobal = entryPlug->n_SRI;
81    mpiPlug->nMolGlobal = entryPlug->n_mol;
82  
83 +  
84    myRandom = new randomSPRNG( baseSeed );
85  
86 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
86 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
87  
88    // Initialize things that we'll send out later:
89    for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
# Line 136 | Line 134 | int* mpiSimulation::divideLabor( void ){
134          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
135          new_atoms = old_atoms + add_atoms;
136  
139        // If the processor already had too many atoms, just skip this
140        // processor and try again.
141
137          // If we've been through this loop too many times, we need
138          // to just give up and assign the molecule to this processor
139          // and be done with it.
# Line 161 | Line 156 | int* mpiSimulation::divideLabor( void ){
156            done = 1;
157            continue;
158          }
164
165        if (old_atoms >= nTarget) continue;
159      
160          // If we can add this molecule to this processor without sending
161          // it above nTarget, then go ahead and do it:
# Line 179 | Line 172 | int* mpiSimulation::divideLabor( void ){
172          }
173  
174  
175 <        // The only situation left is where old_atoms < nTarget, but
176 <        // new_atoms > nTarget.   We want to accept this with some
177 <        // probability that dies off the farther we are from nTarget
175 >        // The only situation left is when new_atoms > nTarget.  We
176 >        // want to accept this with some probability that dies off the
177 >        // farther we are from nTarget
178  
179          // roughly:  x = new_atoms - nTarget
180          //           Pacc(x) = exp(- a * x)
181 <        // where a = 1 / (average atoms per molecule)
181 >        // where a = penalty / (average atoms per molecule)
182  
183          x = (double) (new_atoms - nTarget);
184          y = myRandom->getRandom();
185 <        
186 <        if (exp(- a * x) > y) {
185 >      
186 >        if (y < exp(- a * x)) {
187            MolToProcMap[i] = which_proc;
188            AtomsPerProc[which_proc] += add_atoms;
189            for (j = 0 ; j < add_atoms; j++ ) {
# Line 208 | Line 201 | int* mpiSimulation::divideLabor( void ){
201  
202      // Spray out this nonsense to all other processors:
203  
204 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
205 <                          MPI_INT, 0);
204 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
205 >              MPI_INT, 0, MPI_COMM_WORLD);
206  
207 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
208 <                          MPI_INT, 0);
207 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
208 >              MPI_INT, 0, MPI_COMM_WORLD);
209  
210 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
211 <                          MPI_INT, 0);
210 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
211 >              MPI_INT, 0, MPI_COMM_WORLD);
212  
213 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
214 <                          MPI_INT, 0);    
213 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
214 >              MPI_INT, 0, MPI_COMM_WORLD);    
215    } else {
216  
217      // Listen to your marching orders from processor 0:
218      
219 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
220 <                          MPI_INT, 0);
219 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
220 >              MPI_INT, 0, MPI_COMM_WORLD);
221      
222 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
223 <                          MPI_INT, 0);
222 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
223 >              MPI_INT, 0, MPI_COMM_WORLD);
224  
225 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
226 <                          MPI_INT, 0);
225 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
226 >              MPI_INT, 0, MPI_COMM_WORLD);
227      
228 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
229 <                          MPI_INT, 0);
228 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
229 >              MPI_INT, 0, MPI_COMM_WORLD);
230  
231  
232    }
# Line 255 | Line 248 | int* mpiSimulation::divideLabor( void ){
248      }
249    }
250  
251 <  std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n";
252 <
253 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
254 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
251 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
252 >                MPI_COMM_WORLD);
253 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
254 >                MPI_SUM, MPI_COMM_WORLD);
255    
256    if( nmol_global != entryPlug->n_mol ){
257      sprintf( painCave.errMsg,
# Line 303 | Line 296 | void mpiSimulation::mpiRefresh( void ){
296    int isError, i;
297    int *globalIndex = new int[mpiPlug->myNlocal];
298  
299 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
299 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
300 >  // not barf
301  
302 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
303 +
304    
305    isError = 0;
306    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines