ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/mdtools/mpi_implementation/mpiSimulation.cpp
(Generate patch)

Comparing trunk/mdtools/mpi_implementation/mpiSimulation.cpp (file contents):
Revision 200 by mmeineke, Mon Dec 9 20:54:42 2002 UTC vs.
Revision 253 by chuckv, Thu Jan 30 15:20:21 2003 UTC

# Line 1 | Line 1
1 < i#include <cstdlib>
1 > #include <cstdlib>
2   #include <cstring>
3   #include <mpi.h>
4  
5   #include "mpiSimulation.hpp"
6   #include "simError.h"
7  
8 + extern "C"{
9 +  void wrapsimparallelmod_( void (*wrapFunction)(void (*fSub)( mpiSimData*,
10 +                                                               int*, int*,
11 +                                                               int*)));
12 + }
13  
14 + void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*));
15  
16 +
17 + mpiSimulation* mpiSim;
18 +
19   mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
20   {
21    entryPlug = the_entryPlug;
22 +  mpiPlug = new mpiSimData;
23    
24 <  numberProcessors = MPI::COMM_WORLD.Get_size();
25 <  myNode = worldRank;
24 >  mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
25 >  mpiPlug->myNode = worldRank;
26    
27 <  // let the simulation know were there.
28 <  entryPlug->mpiSim = this;
27 >  mpiSim = this;
28 >  wrapMe();
29 >
30   }
31  
32  
33   mpiSimulation::~mpiSimulation(){
34    
35 <  // empty for now
35 >  delete mpiPlug;
36 >  // perhaps we should let fortran know the party is over.
37    
38   }
39  
40 + void mpiSimulation::wrapMe(){
41  
42 < void mpiSimulation::divideLabor( void ){
42 >  wrapsimparallelmod_( wrapSimParallel );
43 > }
44  
45 +
46 +
47 + int* mpiSimulation::divideLabor( void ){
48 +
49 +  int* globalIndex;
50 +
51    int nComponents;
52    MoleculeStamp** compStamps;
53    int* componentsNmol;
# Line 40 | Line 60 | void mpiSimulation::divideLabor( void ){
60    int molIndex, atomIndex, compIndex, compStart;
61    int done;
62    int nLocal, molLocal;
63 <  int i;
63 >  int i, index;
64    int smallDiff, bigDiff;
65  
66    int testSum;
# Line 49 | Line 69 | void mpiSimulation::divideLabor( void ){
69    compStamps = entryPlug->compStamps;
70    componentsNmol = entryPlug->componentsNmol;
71  
72 +  mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
73 +  mpiPlug->nBondsGlobal = entryPlug->n_bonds;
74 +  mpiPlug->nBendsGlobal = entryPlug->n_bends;
75 +  mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
76 +  mpiPlug->nSRIGlobal = entryPlug->n_SRI;
77 +  mpiPlug->nMolGlobal = entryPlug->n_mol;
78 +
79    numerator = (double) entryPlug->n_atoms;
80 <  denominator = (double) numberProcessors;
80 >  denominator = (double) mpiPlug->numberProcessors;
81    precast = numerator / denominator;
82    nTarget = (int)( precast + 0.5 );
83    
# Line 58 | Line 85 | void mpiSimulation::divideLabor( void ){
85    atomIndex = 0;
86    compIndex = 0;
87    compStart = 0;
88 <  for( i=0; i<(numberProcessors-1); i++){
88 >  for( i=0; i<(mpiPlug->numberProcessors-1); i++){
89      
90      done = 0;
91      nLocal = 0;
92      molLocal = 0;
93  
94 <    if( i == myNode ){
95 <      myMolStart = molIndex;
96 <      myAtomStart = atomIndex;
94 >    if( i == mpiPlug->myNode ){
95 >      mpiPlug->myMolStart = molIndex;
96 >      mpiPlug->myAtomStart = atomIndex;
97      }
98      
99      while( !done ){
# Line 101 | Line 128 | void mpiSimulation::divideLabor( void ){
128        }
129      }
130      
131 <    if( i == myNode ){
132 <      myMolEnd = (molIndex - 1);
133 <      myAtomEnd = (atomIndex - 1);
134 <      myNlocal = nLocal;
135 <      myMol = molLocal;
131 >    if( i == mpiPlug->myNode ){
132 >      mpiPlug->myMolEnd = (molIndex - 1);
133 >      mpiPlug->myAtomEnd = (atomIndex - 1);
134 >      mpiPlug->myNlocal = nLocal;
135 >      mpiPlug->myMol = molLocal;
136      }
137      
138      numerator = (double)( entryPlug->n_atoms - atomIndex );
139 <    denominator = (double)( numberProcessors - (i+1) );
139 >    denominator = (double)( mpiPlug->numberProcessors - (i+1) );
140      precast = numerator / denominator;
141      nTarget = (int)( precast + 0.5 );
142    }
143    
144 <  if( myNode == numberProcessors-1 ){
145 <      myMolStart = molIndex;
146 <      myAtomStart = atomIndex;
144 >  if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){
145 >      mpiPlug->myMolStart = molIndex;
146 >      mpiPlug->myAtomStart = atomIndex;
147  
148        nLocal = 0;
149        molLocal = 0;
# Line 134 | Line 161 | void mpiSimulation::divideLabor( void ){
161          molLocal++;
162        }
163        
164 <      myMolEnd = (molIndex - 1);
165 <      myAtomEnd = (atomIndex - 1);
166 <      myNlocal = nLocal;  
167 <      myMol = molLocal;
164 >      mpiPlug->myMolEnd = (molIndex - 1);
165 >      mpiPlug->myAtomEnd = (atomIndex - 1);
166 >      mpiPlug->myNlocal = nLocal;  
167 >      mpiPlug->myMol = molLocal;
168    }
169  
170  
171 <  MPI_Allreduce( &Nlocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
171 >  MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD );
172    
173 <  if( myNode == 0 ){
173 >  if( mpiPlug->myNode == 0 ){
174      if( testSum != entryPlug->n_atoms ){
175        sprintf( painCave.errMsg,
176                 "The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n",
# Line 158 | Line 185 | void mpiSimulation::divideLabor( void ){
185    MPIcheckPoint();
186  
187    // lets create the identity array
188 +
189 +  globalIndex = new int[mpiPlug->myNlocal];
190 +  index = mpiPlug->myAtomStart;
191 +  for( i=0; i<mpiPlug->myNlocal; i++){
192 +    globalIndex[i] = index;
193 +    index++;
194 +  }
195 +
196 +  return globalIndex;
197   }
198 +
199 +
200 + void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*)){
201 +  
202 +  mpiSim->setInternal( fSub );
203 + }
204 +
205 +
206 + void mpiSimulation::mpiRefresh( void ){
207 +
208 +  int isError, i;
209 +  int *globalIndex = new int[mpiPlug->myNlocal];
210 +
211 +  for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
212 +
213 +  
214 +  isError = 0;
215 +  setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
216 +  if( isError ){
217 +
218 +    sprintf( painCave.errMsg,
219 +             "mpiRefresh errror: fortran didn't like something we gave it.\n" );
220 +    painCave.isFatal = 1;
221 +    simError();
222 +  }
223 +
224 +  delete[] globalIndex;
225 +
226 +  sprintf( checkPointMsg,
227 +           " mpiRefresh successful.\n" );
228 +  MPIcheckPoint();
229 + }
230 +  

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines