ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 406 by chuckv, Wed Mar 26 19:34:49 2003 UTC vs.
Revision 1129 by tim, Thu Apr 22 03:29:30 2004 UTC

# Line 1 | Line 1
1   #ifdef IS_MPI
2 <
3 < #include <cstdlib>
4 < #include <cstring>
2 > #include <iostream>
3 > #include <stdlib.h>
4 > #include <string.h>
5 > #include <math.h>
6   #include <mpi.h>
6 #include <mpi++.h>
7  
8   #include "mpiSimulation.hpp"
9   #include "simError.h"
10   #include "fortranWrappers.hpp"
11   #include "randomSPRNG.hpp"
12  
13
13   mpiSimulation* mpiSim;
14  
15   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
# Line 18 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
17    entryPlug = the_entryPlug;
18    mpiPlug = new mpiSimData;
19    
20 <  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
20 >  MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21    mpiPlug->myNode = worldRank;
22  
23    MolToProcMap = new int[entryPlug->n_mol];
24    MolComponentType = new int[entryPlug->n_mol];
26
25    AtomToProcMap = new int[entryPlug->n_atoms];
26  
27    mpiSim = this;
# Line 33 | Line 31 | mpiSimulation::~mpiSimulation(){
31  
32   mpiSimulation::~mpiSimulation(){
33    
34 +  delete[] MolToProcMap;
35 +  delete[] MolComponentType;
36 +  delete[] AtomToProcMap;
37 +
38    delete mpiPlug;
39    // perhaps we should let fortran know the party is over.
40    
41   }
42  
43 < int* mpiSimulation::divideLabor( void ){
43 > void mpiSimulation::divideLabor( ){
44  
43  int* globalIndex;
44
45    int nComponents;
46    MoleculeStamp** compStamps;
47 <  randomSPRNG myRandom;
47 >  randomSPRNG *myRandom;
48    int* componentsNmol;
49    int* AtomsPerProc;
50  
# Line 55 | Line 55 | int* mpiSimulation::divideLabor( void ){
55    int old_atoms, add_atoms, new_atoms;
56  
57    int nTarget;
58 <  int molIndex, atomIndex, compIndex, compStart;
58 >  int molIndex, atomIndex;
59    int done;
60 <  int nLocal, molLocal;
61 <  int i, index;
62 <  int smallDiff, bigDiff;
60 >  int i, j, loops, which_proc, nmol_local, natoms_local;
61 >  int nmol_global, natoms_global;
62 >  int local_index;
63 >  int baseSeed = entryPlug->getSeed();
64  
64  int testSum;
65
65    nComponents = entryPlug->nComponents;
66    compStamps = entryPlug->compStamps;
67    componentsNmol = entryPlug->componentsNmol;
# Line 75 | Line 74 | int* mpiSimulation::divideLabor( void ){
74    mpiPlug->nSRIGlobal = entryPlug->n_SRI;
75    mpiPlug->nMolGlobal = entryPlug->n_mol;
76  
77 <  myRandom = new randomSPRNG();
77 >  myRandom = new randomSPRNG( baseSeed );
78  
79 <  a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
79 >  a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
80  
81    // Initialize things that we'll send out later:
82    for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
# Line 120 | Line 119 | int* mpiSimulation::divideLabor( void ){
119          
120          // Pick a processor at random
121  
122 <        which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors);
122 >        which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
123  
124          // How many atoms does this processor have?
125          
126          old_atoms = AtomsPerProc[which_proc];
127 <
129 <        // If the processor already had too many atoms, just skip this
130 <        // processor and try again.
131 <
132 <        if (old_atoms >= nTarget) continue;
133 <
134 <        add_atoms = compStamps[MolComponentType[i]]->getNatoms();
127 >        add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
128          new_atoms = old_atoms + add_atoms;
136    
137        // If we can add this molecule to this processor without sending
138        // it above nTarget, then go ahead and do it:
139    
140        if (new_atoms <= nTarget) {
141          MolToProcMap[i] = which_proc;
142          AtomsPerProc[which_proc] += add_atoms;
143          for (j = 0 ; j < add_atoms; j++ ) {
144            atomIndex++;
145            AtomToProcMap[atomIndex] = which_proc;
146          }
147          done = 1;
148          continue;
149        }
129  
130          // If we've been through this loop too many times, we need
131          // to just give up and assign the molecule to this processor
# Line 164 | Line 143 | int* mpiSimulation::divideLabor( void ){
143            MolToProcMap[i] = which_proc;
144            AtomsPerProc[which_proc] += add_atoms;
145            for (j = 0 ; j < add_atoms; j++ ) {
146 <            atomIndex++;
147 <            AtomToProcMap[atomIndex] = which_proc;
146 >            AtomToProcMap[atomIndex] = which_proc;
147 >            atomIndex++;
148            }
149            done = 1;
150            continue;
151          }
152 +    
153 +        // If we can add this molecule to this processor without sending
154 +        // it above nTarget, then go ahead and do it:
155 +    
156 +        if (new_atoms <= nTarget) {
157 +          MolToProcMap[i] = which_proc;
158 +          AtomsPerProc[which_proc] += add_atoms;
159 +          for (j = 0 ; j < add_atoms; j++ ) {
160 +            AtomToProcMap[atomIndex] = which_proc;
161 +            atomIndex++;
162 +          }
163 +          done = 1;
164 +          continue;
165 +        }
166  
174        // The only situation left is where old_atoms < nTarget, but
175        // new_atoms > nTarget.   We want to accept this with some
176        // probability that dies off the farther we are from nTarget
167  
168 +        // The only situation left is when new_atoms > nTarget.  We
169 +        // want to accept this with some probability that dies off the
170 +        // farther we are from nTarget
171 +
172          // roughly:  x = new_atoms - nTarget
173          //           Pacc(x) = exp(- a * x)
174 <        // where a = 1 / (average atoms per molecule)
174 >        // where a = penalty / (average atoms per molecule)
175  
176          x = (double) (new_atoms - nTarget);
177 <        y = myRandom.getRandom();
178 <        
179 <        if (exp(- a * x) > y) {
177 >        y = myRandom->getRandom();
178 >      
179 >        if (y < exp(- a * x)) {
180            MolToProcMap[i] = which_proc;
181            AtomsPerProc[which_proc] += add_atoms;
182            for (j = 0 ; j < add_atoms; j++ ) {
183 <            atomIndex++;
184 <            AtomToProcMap[atomIndex] = which_proc;
185 <          }
183 >            AtomToProcMap[atomIndex] = which_proc;
184 >            atomIndex++;
185 >           }
186            done = 1;
187            continue;
188          } else {
# Line 200 | Line 194 | int* mpiSimulation::divideLabor( void ){
194  
195      // Spray out this nonsense to all other processors:
196  
197 <    MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
198 <                          MPI_INT, 0);
197 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 >              MPI_INT, 0, MPI_COMM_WORLD);
199  
200 <    MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
201 <                          MPI_INT, 0);
200 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 >              MPI_INT, 0, MPI_COMM_WORLD);
202  
203 <    MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
204 <                          MPI_INT, 0);
203 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 >              MPI_INT, 0, MPI_COMM_WORLD);
205  
206 <    MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
207 <                          MPI_INT, 0);    
206 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 >              MPI_INT, 0, MPI_COMM_WORLD);    
208    } else {
209  
210      // Listen to your marching orders from processor 0:
211      
212 <    MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
213 <                          MPI_INT, 0);
212 >    MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 >              MPI_INT, 0, MPI_COMM_WORLD);
214      
215 <    MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
216 <                          MPI_INT, 0);
215 >    MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 >              MPI_INT, 0, MPI_COMM_WORLD);
217  
218 <    MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
219 <                          MPI_INT, 0);
218 >    MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 >              MPI_INT, 0, MPI_COMM_WORLD);
220      
221 <    MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
222 <                          MPI_INT, 0);
221 >    MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 >              MPI_INT, 0, MPI_COMM_WORLD);
223 >
224 >
225    }
226  
227  
# Line 245 | Line 241 | int* mpiSimulation::divideLabor( void ){
241      }
242    }
243  
244 <  MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
245 <  MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
244 >  MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
245 >                MPI_COMM_WORLD);
246 >  MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
247 >                MPI_SUM, MPI_COMM_WORLD);
248    
249    if( nmol_global != entryPlug->n_mol ){
250      sprintf( painCave.errMsg,
# Line 273 | Line 271 | int* mpiSimulation::divideLabor( void ){
271    mpiPlug->myNMol = nmol_local;
272    mpiPlug->myNlocal = natoms_local;
273  
274 <  globalIndex = new int[mpiPlug->myNlocal];
274 >  globalAtomIndex.resize(mpiPlug->myNlocal);
275 >  globalToLocalAtom.resize(mpiPlug->nAtomsGlobal);
276    local_index = 0;
277    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
278      if (AtomToProcMap[i] == mpiPlug->myNode) {
279 +      globalAtomIndex[local_index] = i;
280 +
281 +      globalToLocalAtom[i] = local_index;
282        local_index++;
283 <      globalIndex[local_index] = i;
283 >      
284      }
285 +    else
286 +       globalToLocalAtom[i] = -1;
287    }
284  
288  
289 <
290 <
291 <   index = mpiPlug->myAtomStart;
292 < //   for( i=0; i<mpiPlug->myNlocal; i++){
293 < //     globalIndex[i] = index;
294 < //     index++;
295 < //   }
296 <
297 < //   return globalIndex;
289 >  globalMolIndex.resize(mpiPlug->myNMol);
290 >  globalToLocalMol.resize(mpiPlug->nMolGlobal);
291 >  
292 >  local_index = 0;
293 >  for (i = 0; i < mpiPlug->nMolGlobal; i++) {
294 >    if (MolToProcMap[i] == mpiPlug->myNode) {
295 >      globalMolIndex[local_index] = i;
296 >      globalToLocalMol[i] = local_index;
297 >      local_index++;
298 >    }
299 >    else
300 >      globalToLocalMol[i] = -1;
301 >  }
302 >  
303   }
304  
305  
# Line 300 | Line 308 | void mpiSimulation::mpiRefresh( void ){
308    int isError, i;
309    int *globalIndex = new int[mpiPlug->myNlocal];
310  
311 <  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
311 >  // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
312 >  // not barf
313  
314 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
315 +
316    
317    isError = 0;
318    setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines