ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/mdtools/md_code/DumpWriter.cpp
(Generate patch)

Comparing trunk/mdtools/md_code/DumpWriter.cpp (file contents):
Revision 249 by chuckv, Mon Jan 27 21:28:19 2003 UTC vs.
Revision 260 by chuckv, Fri Jan 31 21:04:27 2003 UTC

# Line 5 | Line 5
5   #ifdef IS_MPI
6   #include <mpi.h>
7   #include "mpiSimulation.hpp"
8 + #define TAKE_THIS_TAG 0
9   #endif //is_mpi
10  
11   #include "ReadWrite.hpp"
# Line 41 | Line 42 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
42  
43   #ifdef IS_MPI
44    }
45 +
46 +  sprintf( checkPointMsg,
47 +           "Sucessfully opened output file for dumping.\n");
48 +  MPIcheckPoint();
49   #endif // is_mpi
50   }
51  
# Line 60 | Line 65 | void DumpWriter::writeDump( double currentTime ){
65   void DumpWriter::writeDump( double currentTime ){
66    
67    const int BUFFERSIZE = 2000;
68 <  char tempBuffer[500];
68 >  char tempBuffer[BUFFERSIZE];
69    char writeLine[BUFFERSIZE];
70  
71    int i;
# Line 130 | Line 135 | void DumpWriter::writeDump( double currentTime ){
135    // write out header and node 0's coordinates
136  
137    if( worldRank == 0 ){
138 <    outFile << entry_plug->mpiSim->getTotAtoms() << "\n";
138 >    outFile << mpiSim->getTotAtoms() << "\n";
139        
140      outFile << currentTime << "\t"
141              << entry_plug->box_x << "\t"
# Line 170 | Line 175 | void DumpWriter::writeDump( double currentTime ){
175        else
176          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
177          
178 <      outfile << writeLine;
178 >      outFile << writeLine;
179        masterIndex++;
180      }
181      outFile.flush();
182    }
183 +
184 +  sprintf( checkPointMsg,
185 +           "Sucessfully wrote node 0's dump configuration.\n");
186 +  MPIcheckPoint();
187      
188 <  for (procIndex = 1; procIndex < entry_plug->mpiSim->getNumberProcessors();
188 >  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
189         procIndex++){
190  
191      if( worldRank == 0 ){
192 <        
192 >      
193        mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
194 <                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus);
195 <
194 >                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
195 >      
196        mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
197 <                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus);
198 <        
197 >                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
198 >      
199        // Make sure where node 0 is writing to, matches where the
200        // receiving node expects it to be.
201 <        
201 >      
202        if (masterIndex != nodeAtomsStart){
203          sendError = 1;
204 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
204 >        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
205                            MPI_COMM_WORLD);
206          sprintf(painCave.errMsg,
207                  "DumpWriter error: atoms start index (%d) for "
# Line 201 | Line 210 | void DumpWriter::writeDump( double currentTime ){
210          painCave.isFatal = 1;
211          simError();
212        }
213 <        
213 >      
214        sendError = 0;
215 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
215 >      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
216                          MPI_COMM_WORLD);
217 <
217 >      
218        // recieve the nodes writeLines
219 <
220 <      for ( i = nodeAtomStart; i <= nodeAtomEnd, i++){
221 <          
222 <        mpiErr = MPI_Recv(&read_buffer,BUFFERSIZE,MPI_CHAR,procIndex,
223 <                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus );
224 <
219 >      
220 >      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
221 >        
222 >        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
223 >                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
224 >        
225          outFile << writeLine;
226          masterIndex++;
227        }
# Line 220 | Line 229 | void DumpWriter::writeDump( double currentTime ){
229  
230      else if( worldRank == procIndex ){
231  
232 <      nodeAtomStart = entry_plug->mpiSim->getMyAtomStart();
233 <      nodeAtomEnd = entry_plug->mpiSim->getMyAtomEnd();
232 >      nodeAtomsStart = mpiSim->getMyAtomStart();
233 >      nodeAtomsEnd = mpiSim->getMyAtomEnd();
234          
235 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG,
235 >      fprintf( stderr,
236 >               "node %d: myatomStart-> %d; myatomEnd-> %d\n",
237 >               worldRank, nodeAtomsStart, nodeAtomsEnd );
238 >
239 >      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
240                          MPI_COMM_WORLD);
241 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG,
241 >      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
242                          MPI_COMM_WORLD);
243          
244 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG,
244 >      fprintf( stderr, "node %d: sent off the start and end\n", worldRank );
245 >
246 >      sendError = -1;
247 >      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
248                          MPI_COMM_WORLD, istatus);
233      if (sendError) mpiCheckpoint();
249  
250 +      fprintf( stderr, "node %d: value of sendError is %d\n", worldRank, sendError );
251 +
252 +      if (sendError) MPIcheckPoint();
253 +
254        // send current node's configuration line by line.
255  
256        for( i=0; i<nAtoms; i++ ){
# Line 266 | Line 285 | void DumpWriter::writeDump( double currentTime ){
285          else
286            strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
287            
288 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG,
288 >        fprintf( stderr,
289 >                 "node %d: I'm sending the line:\n->%s\n", worldRank, writeLine );
290 >        
291 >        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
292                            MPI_COMM_WORLD);
293        }
294      }
295        
296      sprintf(checkPointMsg,"Node %d sent dump configuration.",
297              procIndex);
298 <    mpiCheckPoint();
298 >    MPIcheckPoint();
299    }
300      
301   #endif // is_mpi
# Line 318 | Line 340 | void DumpWriter::writeFinal(){
340   #ifdef IS_MPI
341    }
342    
343 <  sprintf(checkPointMsg,"Opened file for final configuration\n",procIndex);
344 <  mpiCheckPoint();  
343 >  sprintf(checkPointMsg,"Opened file for final configuration\n");
344 >  MPIcheckPoint();  
345    
346   #endif //is_mpi
347  
# Line 384 | Line 406 | void DumpWriter::writeFinal(){
406    // write out header and node 0's coordinates
407  
408    if( worldRank == 0 ){
409 <    finalOut << entry_plug->mpiSim->getTotAtoms() << "\n";
409 >    finalOut << mpiSim->getTotAtoms() << "\n";
410        
411      finalOut << entry_plug->box_x << "\t"
412               << entry_plug->box_y << "\t"
413               << entry_plug->box_z << "\n";
414      
415      masterIndex = 0;
416 +    
417 +    std::cerr << "about to write node 0 aztoms. nAtoms = " << nAtoms << "\n";
418 +    
419      for( i=0; i<nAtoms; i++ ){
420        
421        sprintf( tempBuffer,
# Line 423 | Line 448 | void DumpWriter::writeFinal(){
448        else
449          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
450          
451 <      outfile << writeLine;
451 >      finalOut << writeLine;
452        masterIndex++;
453      }
454      finalOut.flush();
455    }
456      
457 <  for (procIndex = 1; procIndex < entry_plug->mpiSim->getNumberProcessors();
457 >  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
458         procIndex++){
459  
460      if( worldRank == 0 ){
461          
462        mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
463 <                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus);
463 >                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
464  
465        mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
466 <                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus);
466 >                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
467          
468        // Make sure where node 0 is writing to, matches where the
469        // receiving node expects it to be.
470          
471        if (masterIndex != nodeAtomsStart){
472          sendError = 1;
473 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
473 >        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
474                            MPI_COMM_WORLD);
475          sprintf(painCave.errMsg,
476                  "DumpWriter error: atoms start index (%d) for "
# Line 456 | Line 481 | void DumpWriter::writeFinal(){
481        }
482          
483        sendError = 0;
484 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
484 >      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
485                          MPI_COMM_WORLD);
486  
487        // recieve the nodes writeLines
488  
489 <      for ( i = nodeAtomStart; i <= nodeAtomEnd, i++){
489 >      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
490            
491 <        mpiErr = MPI_Recv(&read_buffer,BUFFERSIZE,MPI_CHAR,procIndex,
492 <                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus );
491 >        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
492 >                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
493  
494          finalOut << writeLine;
495          masterIndex++;
# Line 475 | Line 500 | void DumpWriter::writeFinal(){
500  
501      else if( worldRank == procIndex ){
502  
503 <      nodeAtomStart = entry_plug->mpiSim->getMyAtomStart();
504 <      nodeAtomEnd = entry_plug->mpiSim->getMyAtomEnd();
503 >      nodeAtomsStart = mpiSim->getMyAtomStart();
504 >      nodeAtomsEnd = mpiSim->getMyAtomEnd();
505          
506 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG,
506 >      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
507                          MPI_COMM_WORLD);
508 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG,
508 >      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
509                          MPI_COMM_WORLD);
510          
511 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG,
511 >      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
512                          MPI_COMM_WORLD, istatus);
513 <      if (sendError) mpiCheckpoint();
513 >      if (sendError) MPIcheckPoint();
514  
515        // send current node's configuration line by line.
516  
# Line 521 | Line 546 | void DumpWriter::writeFinal(){
546          else
547            strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
548            
549 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG,
549 >        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
550                            MPI_COMM_WORLD);
551        }
552      }
553        
554      sprintf(checkPointMsg,"Node %d sent dump configuration.",
555              procIndex);
556 <    mpiCheckPoint();
556 >    MPIcheckPoint();
557    }
558  
559    if( worldRank == 0 ) finalOut.close();

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines