ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/mdtools/md_code/DumpWriter.cpp
(Generate patch)

Comparing trunk/mdtools/md_code/DumpWriter.cpp (file contents):
Revision 254 by chuckv, Thu Jan 30 20:03:37 2003 UTC vs.
Revision 261 by chuckv, Mon Feb 3 21:15:59 2003 UTC

# Line 5 | Line 5
5   #ifdef IS_MPI
6   #include <mpi.h>
7   #include "mpiSimulation.hpp"
8 + #define TAKE_THIS_TAG 0
9   #endif //is_mpi
10  
11   #include "ReadWrite.hpp"
# Line 16 | Line 17 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
17  
18   DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
19  
20 +  entry_plug = the_entry_plug;
21 +
22   #ifdef IS_MPI
23    if(worldRank == 0 ){
24   #endif // is_mpi
25      
26 <    entry_plug = the_entry_plug;
26 >
27      
28      strcpy( outName, entry_plug->sampleName );
29      
27    std::cerr << "Opening " << outName << " for dumping.\n";
28
30      outFile.open(outName, ios::out | ios::trunc );
31      
32      if( !outFile ){
# Line 41 | Line 42 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
42  
43   #ifdef IS_MPI
44    }
45 +
46 +  sprintf( checkPointMsg,
47 +           "Sucessfully opened output file for dumping.\n");
48 +  MPIcheckPoint();
49   #endif // is_mpi
50   }
51  
# Line 60 | Line 65 | void DumpWriter::writeDump( double currentTime ){
65   void DumpWriter::writeDump( double currentTime ){
66    
67    const int BUFFERSIZE = 2000;
68 <  char tempBuffer[500];
68 >  char tempBuffer[BUFFERSIZE];
69    char writeLine[BUFFERSIZE];
70  
71    int i;
# Line 81 | Line 86 | void DumpWriter::writeDump( double currentTime ){
86      
87    for( i=0; i<nAtoms; i++ ){
88        
89 +
90      sprintf( tempBuffer,
91               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
92               atoms[i]->getType(),
# Line 175 | Line 181 | void DumpWriter::writeDump( double currentTime ){
181      }
182      outFile.flush();
183    }
184 +
185 +  sprintf( checkPointMsg,
186 +           "Sucessfully wrote node 0's dump configuration.\n");
187 +  MPIcheckPoint();
188      
189    for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
190         procIndex++){
191  
192      if( worldRank == 0 ){
193 <        
193 >      
194        mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
195 <                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus);
196 <
195 >                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
196 >      
197        mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
198 <                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus);
199 <        
198 >                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
199 >      
200        // Make sure where node 0 is writing to, matches where the
201        // receiving node expects it to be.
202 <        
202 >      
203        if (masterIndex != nodeAtomsStart){
204          sendError = 1;
205 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
205 >        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
206                            MPI_COMM_WORLD);
207          sprintf(painCave.errMsg,
208                  "DumpWriter error: atoms start index (%d) for "
# Line 201 | Line 211 | void DumpWriter::writeDump( double currentTime ){
211          painCave.isFatal = 1;
212          simError();
213        }
214 <        
214 >      
215        sendError = 0;
216 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
216 >      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
217                          MPI_COMM_WORLD);
218 <
218 >      
219        // recieve the nodes writeLines
220 <
220 >      
221        for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
222 <          
222 >        
223          mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
224 <                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus );
225 <
224 >                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
225 >        
226          outFile << writeLine;
227          masterIndex++;
228        }
# Line 222 | Line 232 | void DumpWriter::writeDump( double currentTime ){
232  
233        nodeAtomsStart = mpiSim->getMyAtomStart();
234        nodeAtomsEnd = mpiSim->getMyAtomEnd();
235 <        
236 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG,
235 >
236 >      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
237                          MPI_COMM_WORLD);
238 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG,
238 >      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
239                          MPI_COMM_WORLD);
240          
241 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG,
241 >      sendError = -1;
242 >      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
243                          MPI_COMM_WORLD, istatus);
244 +
245        if (sendError) MPIcheckPoint();
246  
247        // send current node's configuration line by line.
248  
249        for( i=0; i<nAtoms; i++ ){
250 <          
250 >
251          sprintf( tempBuffer,
252                   "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
253                   atoms[i]->getType(),
# Line 244 | Line 256 | void DumpWriter::writeDump( double currentTime ){
256                   atoms[i]->getZ(),
257                   atoms[i]->get_vx(),
258                   atoms[i]->get_vy(),
259 <                 atoms[i]->get_vz());
259 >                 atoms[i]->get_vz()); // check here.
260          strcpy( writeLine, tempBuffer );
261            
262          if( atoms[i]->isDirectional() ){
# Line 266 | Line 278 | void DumpWriter::writeDump( double currentTime ){
278          else
279            strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
280            
281 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG,
281 >        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
282                            MPI_COMM_WORLD);
283        }
284      }
# Line 391 | Line 403 | void DumpWriter::writeFinal(){
403               << entry_plug->box_z << "\n";
404      
405      masterIndex = 0;
406 +    
407      for( i=0; i<nAtoms; i++ ){
408        
409        sprintf( tempBuffer,
# Line 423 | Line 436 | void DumpWriter::writeFinal(){
436        else
437          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
438          
439 <      outFile << writeLine;
439 >      finalOut << writeLine;
440        masterIndex++;
441      }
442      finalOut.flush();
# Line 435 | Line 448 | void DumpWriter::writeFinal(){
448      if( worldRank == 0 ){
449          
450        mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
451 <                        MPI_ANY_TAG,MPI_COMM_WORLD,istatus);
451 >                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
452  
453        mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
454 <                        MPI_ANY_TAG,MPI_COMM_WORLD, istatus);
454 >                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
455          
456        // Make sure where node 0 is writing to, matches where the
457        // receiving node expects it to be.
458          
459        if (masterIndex != nodeAtomsStart){
460          sendError = 1;
461 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
461 >        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
462                            MPI_COMM_WORLD);
463          sprintf(painCave.errMsg,
464                  "DumpWriter error: atoms start index (%d) for "
# Line 456 | Line 469 | void DumpWriter::writeFinal(){
469        }
470          
471        sendError = 0;
472 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,MPI_ANY_TAG,
472 >      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
473                          MPI_COMM_WORLD);
474  
475        // recieve the nodes writeLines
# Line 464 | Line 477 | void DumpWriter::writeFinal(){
477        for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
478            
479          mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
480 <                          MPI_ANY_TAG,MPI_COMM_WORLD,istatus );
480 >                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
481  
482          finalOut << writeLine;
483          masterIndex++;
# Line 478 | Line 491 | void DumpWriter::writeFinal(){
491        nodeAtomsStart = mpiSim->getMyAtomStart();
492        nodeAtomsEnd = mpiSim->getMyAtomEnd();
493          
494 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,MPI_ANY_TAG,
494 >      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
495                          MPI_COMM_WORLD);
496 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,MPI_ANY_TAG,
496 >      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
497                          MPI_COMM_WORLD);
498          
499 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,MPI_ANY_TAG,
499 >      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
500                          MPI_COMM_WORLD, istatus);
501        if (sendError) MPIcheckPoint();
502  
# Line 521 | Line 534 | void DumpWriter::writeFinal(){
534          else
535            strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
536            
537 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,MPI_ANY_TAG,
537 >        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
538                            MPI_COMM_WORLD);
539        }
540      }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines