ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 432 by chuckv, Thu Mar 27 23:33:40 2003 UTC vs.
Revision 447 by mmeineke, Thu Apr 3 20:21:54 2003 UTC

# Line 4 | Line 4
4   #include <cstring>
5   #include <cmath>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
# Line 20 | Line 19 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19    entryPlug = the_entryPlug;
20    mpiPlug = new mpiSimData;
21    
22 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
22 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
23    mpiPlug->myNode = worldRank;
24  
25    MolToProcMap = new int[entryPlug->n_mol];
# Line 85 | Line 84 | int* mpiSimulation::divideLabor( void ){
84  
85    myRandom = new randomSPRNG( baseSeed );
86  
87 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
87 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
88  
89    // Initialize things that we'll send out later:
90    for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
# Line 136 | Line 135 | int* mpiSimulation::divideLabor( void ){
135          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
136          new_atoms = old_atoms + add_atoms;
137  
139        // If the processor already had too many atoms, just skip this
140        // processor and try again.
141
138          // If we've been through this loop too many times, we need
139          // to just give up and assign the molecule to this processor
140          // and be done with it.
# Line 161 | Line 157 | int* mpiSimulation::divideLabor( void ){
157            done = 1;
158            continue;
159          }
164
165        if (old_atoms >= nTarget) continue;
160      
161          // If we can add this molecule to this processor without sending
162          // it above nTarget, then go ahead and do it:
# Line 179 | Line 173 | int* mpiSimulation::divideLabor( void ){
173          }
174  
175  
176 <        // The only situation left is where old_atoms < nTarget, but
177 <        // new_atoms > nTarget.   We want to accept this with some
178 <        // probability that dies off the farther we are from nTarget
176 >        // The only situation left is when new_atoms > nTarget.  We
177 >        // want to accept this with some probability that dies off the
178 >        // farther we are from nTarget
179  
180          // roughly:  x = new_atoms - nTarget
181          //           Pacc(x) = exp(- a * x)
182 <        // where a = 1 / (average atoms per molecule)
182 >        // where a = penalty / (average atoms per molecule)
183  
184          x = (double) (new_atoms - nTarget);
185          y = myRandom->getRandom();
186 <        
187 <        if (exp(- a * x) > y) {
186 >      
187 >        if (y < exp(- a * x)) {
188            MolToProcMap[i] = which_proc;
189            AtomsPerProc[which_proc] += add_atoms;
190            for (j = 0 ; j < add_atoms; j++ ) {
# Line 208 | Line 202 | int* mpiSimulation::divideLabor( void ){
202  
203      // Spray out this nonsense to all other processors:
204  
205 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
206 <                          MPI_INT, 0);
205 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
206 >              MPI_INT, 0, MPI_COMM_WORLD);
207  
208 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
209 <                          MPI_INT, 0);
208 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
209 >              MPI_INT, 0, MPI_COMM_WORLD);
210  
211 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
212 <                          MPI_INT, 0);
211 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
212 >              MPI_INT, 0, MPI_COMM_WORLD);
213  
214 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
215 <                          MPI_INT, 0);    
214 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
215 >              MPI_INT, 0, MPI_COMM_WORLD);    
216    } else {
217  
218      // Listen to your marching orders from processor 0:
219      
220 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
221 <                          MPI_INT, 0);
220 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
221 >              MPI_INT, 0, MPI_COMM_WORLD);
222      
223 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
224 <                          MPI_INT, 0);
223 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
224 >              MPI_INT, 0, MPI_COMM_WORLD);
225  
226 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
227 <                          MPI_INT, 0);
226 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
227 >              MPI_INT, 0, MPI_COMM_WORLD);
228      
229 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
230 <                          MPI_INT, 0);
229 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
230 >              MPI_INT, 0, MPI_COMM_WORLD);
231  
232  
233    }
# Line 255 | Line 249 | int* mpiSimulation::divideLabor( void ){
249      }
250    }
251  
252 <  std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n";
253 <
254 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
255 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
252 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
253 >                MPI_COMM_WORLD);
254 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
255 >                MPI_SUM, MPI_COMM_WORLD);
256    
257    if( nmol_global != entryPlug->n_mol ){
258      sprintf( painCave.errMsg,
# Line 303 | Line 297 | void mpiSimulation::mpiRefresh( void ){
297    int isError, i;
298    int *globalIndex = new int[mpiPlug->myNlocal];
299  
300 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
300 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
301 >  // not barf
302  
303 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
304 +
305    
306    isError = 0;
307    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines