ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 418 by mmeineke, Thu Mar 27 14:30:24 2003 UTC vs.
Revision 708 by tim, Wed Aug 20 22:23:34 2003 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2 <
2 > #include <iostream>
3   #include <cstdlib>
4   #include <cstring>
5   #include <cmath>
6   #include <mpi.h>
7 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
# Line 20 | Line 19 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19    entryPlug = the_entryPlug;
20    mpiPlug = new mpiSimData;
21    
22 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
22 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
23    mpiPlug->myNode = worldRank;
24  
25    MolToProcMap = new int[entryPlug->n_mol];
26    MolComponentType = new int[entryPlug->n_mol];
28
27    AtomToProcMap = new int[entryPlug->n_atoms];
28  
29    mpiSim = this;
# Line 68 | Line 66 | int* mpiSimulation::divideLabor( void ){
66    int nmol_global, natoms_global;
67    int local_index, index;
68    int smallDiff, bigDiff;
69 <  int baseSeed = BASE_SEED;
69 >  int baseSeed = entryPlug->getSeed();
70  
71    int testSum;
72  
# Line 84 | Line 82 | int* mpiSimulation::divideLabor( void ){
82    mpiPlug->nSRIGlobal = entryPlug->n_SRI;
83    mpiPlug->nMolGlobal = entryPlug->n_mol;
84  
85 +  
86    myRandom = new randomSPRNG( baseSeed );
87  
88 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
88 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89  
90    // Initialize things that we'll send out later:
91    for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
# Line 134 | Line 133 | int* mpiSimulation::divideLabor( void ){
133          // How many atoms does this processor have?
134          
135          old_atoms = AtomsPerProc[which_proc];
137
138        // If the processor already had too many atoms, just skip this
139        // processor and try again.
140
141        if (old_atoms >= nTarget) continue;
142
136          add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
137          new_atoms = old_atoms + add_atoms;
145    
146        // If we can add this molecule to this processor without sending
147        // it above nTarget, then go ahead and do it:
148    
149        if (new_atoms <= nTarget) {
150          MolToProcMap[i] = which_proc;
151          AtomsPerProc[which_proc] += add_atoms;
152          for (j = 0 ; j < add_atoms; j++ ) {
153            atomIndex++;
154            AtomToProcMap[atomIndex] = which_proc;
155          }
156          done = 1;
157          continue;
158        }
138  
139          // If we've been through this loop too many times, we need
140          // to just give up and assign the molecule to this processor
# Line 173 | Line 152 | int* mpiSimulation::divideLabor( void ){
152            MolToProcMap[i] = which_proc;
153            AtomsPerProc[which_proc] += add_atoms;
154            for (j = 0 ; j < add_atoms; j++ ) {
155 <            atomIndex++;
156 <            AtomToProcMap[atomIndex] = which_proc;
155 >            AtomToProcMap[atomIndex] = which_proc;
156 >            atomIndex++;
157            }
158            done = 1;
159            continue;
160          }
161 +    
162 +        // If we can add this molecule to this processor without sending
163 +        // it above nTarget, then go ahead and do it:
164 +    
165 +        if (new_atoms <= nTarget) {
166 +          MolToProcMap[i] = which_proc;
167 +          AtomsPerProc[which_proc] += add_atoms;
168 +          for (j = 0 ; j < add_atoms; j++ ) {
169 +            AtomToProcMap[atomIndex] = which_proc;
170 +            atomIndex++;
171 +          }
172 +          done = 1;
173 +          continue;
174 +        }
175  
183        // The only situation left is where old_atoms < nTarget, but
184        // new_atoms > nTarget.   We want to accept this with some
185        // probability that dies off the farther we are from nTarget
176  
177 +        // The only situation left is when new_atoms > nTarget.  We
178 +        // want to accept this with some probability that dies off the
179 +        // farther we are from nTarget
180 +
181          // roughly:  x = new_atoms - nTarget
182          //           Pacc(x) = exp(- a * x)
183 <        // where a = 1 / (average atoms per molecule)
183 >        // where a = penalty / (average atoms per molecule)
184  
185          x = (double) (new_atoms - nTarget);
186          y = myRandom->getRandom();
187 <        
188 <        if (exp(- a * x) > y) {
187 >      
188 >        if (y < exp(- a * x)) {
189            MolToProcMap[i] = which_proc;
190            AtomsPerProc[which_proc] += add_atoms;
191            for (j = 0 ; j < add_atoms; j++ ) {
192 <            atomIndex++;
193 <            AtomToProcMap[atomIndex] = which_proc;
194 <          }
192 >            AtomToProcMap[atomIndex] = which_proc;
193 >            atomIndex++;
194 >           }
195            done = 1;
196            continue;
197          } else {
# Line 209 | Line 203 | int* mpiSimulation::divideLabor( void ){
203  
204      // Spray out this nonsense to all other processors:
205  
206 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
207 <                          MPI_INT, 0);
206 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
207 >              MPI_INT, 0, MPI_COMM_WORLD);
208  
209 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
210 <                          MPI_INT, 0);
209 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
210 >              MPI_INT, 0, MPI_COMM_WORLD);
211  
212 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
213 <                          MPI_INT, 0);
212 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
213 >              MPI_INT, 0, MPI_COMM_WORLD);
214  
215 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
216 <                          MPI_INT, 0);    
215 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
216 >              MPI_INT, 0, MPI_COMM_WORLD);    
217    } else {
218  
219      // Listen to your marching orders from processor 0:
220      
221 <    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
222 <                          MPI_INT, 0);
221 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
222 >              MPI_INT, 0, MPI_COMM_WORLD);
223      
224 <    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
225 <                          MPI_INT, 0);
224 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
225 >              MPI_INT, 0, MPI_COMM_WORLD);
226  
227 <    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
228 <                          MPI_INT, 0);
227 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
228 >              MPI_INT, 0, MPI_COMM_WORLD);
229      
230 <    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
231 <                          MPI_INT, 0);
230 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
231 >              MPI_INT, 0, MPI_COMM_WORLD);
232 >
233 >
234    }
235  
236  
# Line 254 | Line 250 | int* mpiSimulation::divideLabor( void ){
250      }
251    }
252  
253 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
254 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
253 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
254 >                MPI_COMM_WORLD);
255 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
256 >                MPI_SUM, MPI_COMM_WORLD);
257    
258    if( nmol_global != entryPlug->n_mol ){
259      sprintf( painCave.errMsg,
# Line 286 | Line 284 | int* mpiSimulation::divideLabor( void ){
284    local_index = 0;
285    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
286      if (AtomToProcMap[i] == mpiPlug->myNode) {
289      local_index++;
287        globalIndex[local_index] = i;
288 +      local_index++;
289      }
290    }
291 <
291 >  
292    return globalIndex;
293   }
294  
# Line 300 | Line 298 | void mpiSimulation::mpiRefresh( void ){
298    int isError, i;
299    int *globalIndex = new int[mpiPlug->myNlocal];
300  
301 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
301 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
302 >  // not barf
303  
304 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
305 +
306    
307    isError = 0;
308    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines