ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/mpiSimulation.cpp (file contents):
Revision 406 by chuckv, Wed Mar 26 19:34:49 2003 UTC vs.
Revision 422 by mmeineke, Thu Mar 27 19:21:42 2003 UTC

# Line 2 | Line 2
2  
3   #include <cstdlib>
4   #include <cstring>
5 + #include <cmath>
6   #include <mpi.h>
7   #include <mpi++.h>
8  
# Line 10 | Line 11
11   #include "fortranWrappers.hpp"
12   #include "randomSPRNG.hpp"
13  
14 + #define BASE_SEED 123456789
15  
16   mpiSimulation* mpiSim;
17  
# Line 23 | Line 25 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
25  
26    MolToProcMap = new int[entryPlug->n_mol];
27    MolComponentType = new int[entryPlug->n_mol];
26
28    AtomToProcMap = new int[entryPlug->n_atoms];
29  
30    mpiSim = this;
# Line 33 | Line 34 | mpiSimulation::~mpiSimulation(){
34  
35   mpiSimulation::~mpiSimulation(){
36    
37 +  delete[] MolToProcMap;
38 +  delete[] MolComponentType;
39 +  delete[] AtomToProcMap;
40 +
41    delete mpiPlug;
42    // perhaps we should let fortran know the party is over.
43    
# Line 44 | Line 49 | int* mpiSimulation::divideLabor( void ){
49  
50    int nComponents;
51    MoleculeStamp** compStamps;
52 <  randomSPRNG myRandom;
52 >  randomSPRNG *myRandom;
53    int* componentsNmol;
54    int* AtomsPerProc;
55  
# Line 58 | Line 63 | int* mpiSimulation::divideLabor( void ){
63    int molIndex, atomIndex, compIndex, compStart;
64    int done;
65    int nLocal, molLocal;
66 <  int i, index;
66 >  int i, j, loops, which_proc, nmol_local, natoms_local;
67 >  int nmol_global, natoms_global;
68 >  int local_index, index;
69    int smallDiff, bigDiff;
70 +  int baseSeed = BASE_SEED;
71  
72    int testSum;
73  
# Line 75 | Line 83 | int* mpiSimulation::divideLabor( void ){
83    mpiPlug->nSRIGlobal = entryPlug->n_SRI;
84    mpiPlug->nMolGlobal = entryPlug->n_mol;
85  
86 <  myRandom = new randomSPRNG();
86 >  myRandom = new randomSPRNG( baseSeed );
87  
88    a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89  
# Line 120 | Line 128 | int* mpiSimulation::divideLabor( void ){
128          
129          // Pick a processor at random
130  
131 <        which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors);
131 >        which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
132  
133          // How many atoms does this processor have?
134          
# Line 131 | Line 139 | int* mpiSimulation::divideLabor( void ){
139  
140          if (old_atoms >= nTarget) continue;
141  
142 <        add_atoms = compStamps[MolComponentType[i]]->getNatoms();
142 >        add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
143          new_atoms = old_atoms + add_atoms;
144      
145          // If we can add this molecule to this processor without sending
# Line 141 | Line 149 | int* mpiSimulation::divideLabor( void ){
149            MolToProcMap[i] = which_proc;
150            AtomsPerProc[which_proc] += add_atoms;
151            for (j = 0 ; j < add_atoms; j++ ) {
152 <            atomIndex++;
153 <            AtomToProcMap[atomIndex] = which_proc;
152 >            AtomToProcMap[atomIndex] = which_proc;
153 >            atomIndex++;
154            }
155            done = 1;
156            continue;
# Line 164 | Line 172 | int* mpiSimulation::divideLabor( void ){
172            MolToProcMap[i] = which_proc;
173            AtomsPerProc[which_proc] += add_atoms;
174            for (j = 0 ; j < add_atoms; j++ ) {
175 <            atomIndex++;
176 <            AtomToProcMap[atomIndex] = which_proc;
175 >            AtomToProcMap[atomIndex] = which_proc;
176 >            atomIndex++;
177            }
178            done = 1;
179            continue;
# Line 180 | Line 188 | int* mpiSimulation::divideLabor( void ){
188          // where a = 1 / (average atoms per molecule)
189  
190          x = (double) (new_atoms - nTarget);
191 <        y = myRandom.getRandom();
191 >        y = myRandom->getRandom();
192          
193          if (exp(- a * x) > y) {
194            MolToProcMap[i] = which_proc;
195            AtomsPerProc[which_proc] += add_atoms;
196            for (j = 0 ; j < add_atoms; j++ ) {
197 <            atomIndex++;
198 <            AtomToProcMap[atomIndex] = which_proc;
199 <          }
197 >            AtomToProcMap[atomIndex] = which_proc;
198 >            atomIndex++;
199 >           }
200            done = 1;
201            continue;
202          } else {
# Line 200 | Line 208 | int* mpiSimulation::divideLabor( void ){
208  
209      // Spray out this nonsense to all other processors:
210  
211 <    MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
211 >    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
212                            MPI_INT, 0);
213  
214 <    MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
214 >    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
215                            MPI_INT, 0);
216  
217 <    MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
217 >    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
218                            MPI_INT, 0);
219  
220 <    MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
220 >    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
221                            MPI_INT, 0);    
222    } else {
223  
224      // Listen to your marching orders from processor 0:
225      
226 <    MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
226 >    MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
227                            MPI_INT, 0);
228      
229 <    MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
229 >    MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
230                            MPI_INT, 0);
231  
232 <    MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
232 >    MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
233                            MPI_INT, 0);
234      
235 <    MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
235 >    MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
236                            MPI_INT, 0);
237    }
238  
# Line 277 | Line 285 | int* mpiSimulation::divideLabor( void ){
285    local_index = 0;
286    for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
287      if (AtomToProcMap[i] == mpiPlug->myNode) {
280      local_index++;
288        globalIndex[local_index] = i;
289 +      local_index++;
290      }
291    }
292 <  
293 <
286 <
287 <
288 <   index = mpiPlug->myAtomStart;
289 < //   for( i=0; i<mpiPlug->myNlocal; i++){
290 < //     globalIndex[i] = index;
291 < //     index++;
292 < //   }
293 <
294 < //   return globalIndex;
292 >
293 >  return globalIndex;
294   }
295  
296  

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines