ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/DumpWriter.cpp
(Generate patch)

Comparing:
branches/mmeineke/OOPSE/libmdtools/DumpWriter.cpp (file contents), Revision 377 by mmeineke, Fri Mar 21 17:42:12 2003 UTC vs.
trunk/OOPSE/libmdtools/DumpWriter.cpp (file contents), Revision 949 by chuckv, Thu Jan 15 21:57:10 2004 UTC

# Line 1 | Line 1
1 < #include <cstring>
1 > #define _FILE_OFFSET_BITS 64
2 >
3 > #include <string.h>
4   #include <iostream>
5   #include <fstream>
6 + #include <algorithm>
7 + #include <utility>
8  
9   #ifdef IS_MPI
10   #include <mpi.h>
11   #include "mpiSimulation.hpp"
12 < #define TAKE_THIS_TAG 0
12 >
13 > namespace dWrite{
14 >  void DieDieDie( void );
15 > }
16 >
17 > using namespace dWrite;
18   #endif //is_mpi
19  
20   #include "ReadWrite.hpp"
21   #include "simError.h"
22  
14
15
16
17
23   DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
24  
25    entry_plug = the_entry_plug;
# Line 22 | Line 27 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
27   #ifdef IS_MPI
28    if(worldRank == 0 ){
29   #endif // is_mpi
30 <    
31 <
32 <    
33 <    strcpy( outName, entry_plug->sampleName );
34 <    
30 <    outFile.open(outName, ios::out | ios::trunc );
31 <    
32 <    if( !outFile ){
33 <      
30 >
31 >    dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc );
32 >
33 >    if( !dumpFile ){
34 >
35        sprintf( painCave.errMsg,
36                 "Could not open \"%s\" for dump output.\n",
37 <               outName);
37 >               entry_plug->sampleName);
38        painCave.isFatal = 1;
39        simError();
40      }
40  
41    //outFile.setf( ios::scientific );
41  
42   #ifdef IS_MPI
43    }
44  
45 +  //sort the local atoms by global index
46 +  sortByGlobalIndex();
47 +  
48    sprintf( checkPointMsg,
49             "Sucessfully opened output file for dumping.\n");
50    MPIcheckPoint();
# Line 55 | Line 57 | DumpWriter::~DumpWriter( ){
57    if(worldRank == 0 ){
58   #endif // is_mpi
59  
60 <    outFile.close();
60 >    dumpFile.close();
61  
62   #ifdef IS_MPI
63    }
64   #endif // is_mpi
65   }
66  
67 < void DumpWriter::writeDump( double currentTime ){
67 > #ifdef IS_MPI
68 >
69 > /**
70 > * A hook function to load balancing
71 > */
72 >
73 > void DumpWriter::update(){
74 >  sortByGlobalIndex();          
75 > }
76    
77 <  const int BUFFERSIZE = 2000;
78 <  char tempBuffer[BUFFERSIZE];
79 <  char writeLine[BUFFERSIZE];
77 > /**
78 > * Auxiliary sorting function
79 > */
80 >
81 > bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){
82 >  return p1.second < p2.second;
83 > }
84  
85 <  int i;
86 <  double q[4];
87 <  DirectionalAtom* dAtom;
88 <  int nAtoms = entry_plug->n_atoms;
85 > /**
86 > * Sorting the local index by global index
87 > */
88 >
89 > void DumpWriter::sortByGlobalIndex(){
90    Atom** atoms = entry_plug->atoms;
91 <    
91 >  
92 >  indexArray.clear();
93 >  
94 >  for(int i = 0; i < mpiSim->getMyNlocal();i++)
95 >    indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex()));
96 >  
97 >  sort(indexArray.begin(), indexArray.end(), indexSortingCriterion);    
98  
99 < #ifndef IS_MPI
99 >  //for (int i = 0; i < mpiSim->getMyNlocal(); i++) {
100 >  //  printf("node %d has global %d at local %d\n", worldRank, indexArray[i].second, indexArray[i].first);
101 >  //}
102      
103 <  outFile << nAtoms << "\n";
81 <    
82 <  outFile << currentTime << "\t"
83 <          << entry_plug->box_x << "\t"
84 <          << entry_plug->box_y << "\t"
85 <          << entry_plug->box_z << "\n";
86 <    
87 <  for( i=0; i<nAtoms; i++ ){
88 <      
103 > }
104  
105 <    sprintf( tempBuffer,
91 <             "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
92 <             atoms[i]->getType(),
93 <             atoms[i]->getX(),
94 <             atoms[i]->getY(),
95 <             atoms[i]->getZ(),
96 <             atoms[i]->get_vx(),
97 <             atoms[i]->get_vy(),
98 <             atoms[i]->get_vz());
99 <    strcpy( writeLine, tempBuffer );
105 > #endif
106  
107 <    if( atoms[i]->isDirectional() ){
108 <        
109 <      dAtom = (DirectionalAtom *)atoms[i];
110 <      dAtom->getQ( q );
111 <        
112 <      sprintf( tempBuffer,
113 <               "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
114 <               q[0],
115 <               q[1],
116 <               q[2],
117 <               q[3],
118 <               dAtom->getJx(),
119 <               dAtom->getJy(),
120 <               dAtom->getJz());
121 <      strcat( writeLine, tempBuffer );
107 > void DumpWriter::writeDump(double currentTime){
108 >
109 >  ofstream finalOut;
110 >  vector<ofstream*> fileStreams;
111 >
112 > #ifdef IS_MPI
113 >  printf("Hello from node %d\n", worldRank);
114 >  sortByGlobalIndex();
115 >  if(worldRank == 0 ){
116 >    
117 >    finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
118 >    if( !finalOut ){
119 >      sprintf( painCave.errMsg,
120 >               "Could not open \"%s\" for final dump output.\n",
121 >               entry_plug->finalName );
122 >      painCave.isFatal = 1;
123 >      simError();
124      }
117    else
118      strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
119      
120    outFile << writeLine;
125    }
126 <  outFile.flush();
126 > #endif // is_mpi
127  
128 < #else // is_mpi
128 >  fileStreams.push_back(&finalOut);
129 >  fileStreams.push_back(&dumpFile);
130  
131 <  int masterIndex;
127 <  int nodeAtomsStart;
128 <  int nodeAtomsEnd;
129 <  int mpiErr;
130 <  int sendError;
131 <  int procIndex;
132 <    
133 <  MPI_Status istatus[MPI_STATUS_SIZE];
131 >  writeFrame(fileStreams, currentTime);
132  
133 <    
134 <  // write out header and node 0's coordinates
133 > #ifdef IS_MPI
134 >  finalOut.close();
135 > #endif
136 >        
137 > }
138  
139 <  if( worldRank == 0 ){
139 <    outFile << mpiSim->getTotAtoms() << "\n";
140 <      
141 <    outFile << currentTime << "\t"
142 <            << entry_plug->box_x << "\t"
143 <            << entry_plug->box_y << "\t"
144 <            << entry_plug->box_z << "\n";
139 > void DumpWriter::writeFinal(double currentTime){
140  
141 <    masterIndex = 0;
142 <    for( i=0; i<nAtoms; i++ ){
148 <      
149 <      sprintf( tempBuffer,
150 <               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
151 <               atoms[i]->getType(),
152 <               atoms[i]->getX(),
153 <               atoms[i]->getY(),
154 <               atoms[i]->getZ(),
155 <               atoms[i]->get_vx(),
156 <               atoms[i]->get_vy(),
157 <               atoms[i]->get_vz());
158 <      strcpy( writeLine, tempBuffer );
159 <        
160 <      if( atoms[i]->isDirectional() ){
161 <          
162 <        dAtom = (DirectionalAtom *)atoms[i];
163 <        dAtom->getQ( q );
164 <          
165 <        sprintf( tempBuffer,
166 <                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
167 <                 q[0],
168 <                 q[1],
169 <                 q[2],
170 <                 q[3],
171 <                 dAtom->getJx(),
172 <                 dAtom->getJy(),
173 <                 dAtom->getJz());
174 <        strcat( writeLine, tempBuffer );
175 <      }
176 <      else
177 <        strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
178 <        
179 <      outFile << writeLine;
180 <      masterIndex++;
181 <    }
182 <    outFile.flush();
183 <  }
141 >  ofstream finalOut;
142 >  vector<ofstream*> fileStreams;
143  
144 <  sprintf( checkPointMsg,
145 <           "Sucessfully wrote node 0's dump configuration.\n");
187 <  MPIcheckPoint();
188 <    
189 <  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
190 <       procIndex++){
144 > #ifdef IS_MPI
145 >  if(worldRank == 0 ){
146  
147 <    if( worldRank == 0 ){
193 <      
194 <      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
195 <                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
196 <      
197 <      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
198 <                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
199 <      
200 <      // Make sure where node 0 is writing to, matches where the
201 <      // receiving node expects it to be.
202 <      
203 <      if (masterIndex != nodeAtomsStart){
204 <        sendError = 1;
205 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
206 <                          MPI_COMM_WORLD);
207 <        sprintf(painCave.errMsg,
208 <                "DumpWriter error: atoms start index (%d) for "
209 <                "node %d not equal to master index (%d)",
210 <                nodeAtomsStart,procIndex,masterIndex );
211 <        painCave.isFatal = 1;
212 <        simError();
213 <      }
214 <      
215 <      sendError = 0;
216 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
217 <                        MPI_COMM_WORLD);
218 <      
219 <      // recieve the nodes writeLines
220 <      
221 <      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
222 <        
223 <        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
224 <                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
225 <        
226 <        outFile << writeLine;
227 <        masterIndex++;
228 <      }
229 <    }
147 >    finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
148  
149 <    else if( worldRank == procIndex ){
150 <
151 <      nodeAtomsStart = mpiSim->getMyAtomStart();
152 <      nodeAtomsEnd = mpiSim->getMyAtomEnd();
153 <
154 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
237 <                        MPI_COMM_WORLD);
238 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
239 <                        MPI_COMM_WORLD);
240 <        
241 <      sendError = -1;
242 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
243 <                        MPI_COMM_WORLD, istatus);
244 <
245 <      if (sendError) MPIcheckPoint();
246 <
247 <      // send current node's configuration line by line.
248 <
249 <      for( i=0; i<nAtoms; i++ ){
250 <
251 <        sprintf( tempBuffer,
252 <                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
253 <                 atoms[i]->getType(),
254 <                 atoms[i]->getX(),
255 <                 atoms[i]->getY(),
256 <                 atoms[i]->getZ(),
257 <                 atoms[i]->get_vx(),
258 <                 atoms[i]->get_vy(),
259 <                 atoms[i]->get_vz()); // check here.
260 <        strcpy( writeLine, tempBuffer );
261 <          
262 <        if( atoms[i]->isDirectional() ){
263 <            
264 <          dAtom = (DirectionalAtom *)atoms[i];
265 <          dAtom->getQ( q );
266 <            
267 <          sprintf( tempBuffer,
268 <                   "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
269 <                   q[0],
270 <                   q[1],
271 <                   q[2],
272 <                   q[3],
273 <                   dAtom->getJx(),
274 <                   dAtom->getJy(),
275 <                   dAtom->getJz());
276 <          strcat( writeLine, tempBuffer );
277 <        }
278 <        else
279 <          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
280 <          
281 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
282 <                          MPI_COMM_WORLD);
283 <      }
149 >    if( !finalOut ){
150 >      sprintf( painCave.errMsg,
151 >               "Could not open \"%s\" for final dump output.\n",
152 >               entry_plug->finalName );
153 >      painCave.isFatal = 1;
154 >      simError();
155      }
156 <      
286 <    sprintf(checkPointMsg,"Node %d sent dump configuration.",
287 <            procIndex);
288 <    MPIcheckPoint();
156 >
157    }
290    
158   #endif // is_mpi
159 +  
160 +  fileStreams.push_back(&finalOut);  
161 +  writeFrame(fileStreams, currentTime);
162 +
163 + #ifdef IS_MPI
164 +  finalOut.close();
165 + #endif
166 +  
167   }
168  
169 + void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){
170  
171 +  const int BUFFERSIZE = 2000;
172 +  const int MINIBUFFERSIZE = 100;
173  
174 < void DumpWriter::writeFinal(){
174 >  char tempBuffer[BUFFERSIZE];  
175 >  char writeLine[BUFFERSIZE];
176  
177 +  int i, k;
178  
179 <  const int BUFFERSIZE = 2000;
300 <  char tempBuffer[500];
301 <  char writeLine[BUFFERSIZE];
179 > #ifdef IS_MPI
180    
181 <  char finalName[500];
181 >  /*********************************************************************
182 >   * Documentation?  You want DOCUMENTATION?
183 >   *
184 >   * Why all the potatoes below?  
185 >   *
186 >   * To make a long story short, the original version of DumpWriter
187 >   * worked in the most inefficient way possible.  Node 0 would
188 >   * poke each of the node for an individual atom's formatted data
189 >   * as node 0 worked its way down the global index. This was particularly
190 >   * inefficient since the method blocked all processors at every atom
191 >   * (and did it twice!).
192 >   *
193 >   * An intermediate version of DumpWriter could be described from Node
194 >   * zero's perspective as follows:
195 >   *
196 >   *  1) Have 100 of your friends stand in a circle.
197 >   *  2) When you say go, have all of them start tossing potatoes at
198 >   *     you (one at a time).
199 >   *  3) Catch the potatoes.
200 >   *
201 >   * It was an improvement, but MPI has buffers and caches that could
202 >   * best be described in this analogy as "potato nets", so there's no
203 >   * need to block the processors atom-by-atom.
204 >   *
205 >   * This new and improved DumpWriter works in an even more efficient
206 >   * way:
207 >   *
208 >   *  1) Have 100 of your friend stand in a circle.
209 >   *  2) When you say go, have them start tossing 5-pound bags of
210 >   *     potatoes at you.
211 >   *  3) Once you've caught a friend's bag of potatoes,
212 >   *     toss them a spud to let them know they can toss another bag.
213 >   *
214 >   * How's THAT for documentation?
215 >   *
216 >   *********************************************************************/
217  
218 <  int i;
218 >  int *potatoes;
219 >  int myPotato;
220 >
221 >  int nProc;
222 >  int j, which_node, done, which_atom, local_index, currentIndex;
223 >  double atomData6[6];
224 >  double atomData13[13];
225 >  int isDirectional;
226 >  char* atomTypeString;
227 >  char MPIatomTypeString[MINIBUFFERSIZE];
228 >
229 > #else //is_mpi
230 >  int nAtoms = entry_plug->n_atoms;
231 > #endif //is_mpi
232 >
233    double q[4];
234    DirectionalAtom* dAtom;
308  int nAtoms = entry_plug->n_atoms;
235    Atom** atoms = entry_plug->atoms;
236 +  double pos[3], vel[3];
237 +
238 + #ifndef IS_MPI
239    
240 <  ofstream finalOut;
241 <  
242 < #ifdef IS_MPI
243 <  if(worldRank == 0 ){
244 < #endif // is_mpi
245 <    
246 <    strcpy( finalName, entry_plug->finalName );
247 <    
248 <    finalOut.open( finalName, ios::out | ios::trunc );
249 <    if( !finalOut ){
250 <      sprintf( painCave.errMsg,
251 <               "Could not open \"%s\" for final dump output.\n",
252 <               finalName );
253 <      painCave.isFatal = 1;
254 <      simError();
255 <    }
256 <    
257 <    // finalOut.setf( ios::scientific );
329 <    
330 < #ifdef IS_MPI
240 >  for(k = 0; k < outFile.size(); k++){
241 >    *outFile[k] << nAtoms << "\n";
242 >
243 >    *outFile[k] << currentTime << ";\t"
244 >               << entry_plug->Hmat[0][0] << "\t"
245 >                     << entry_plug->Hmat[1][0] << "\t"
246 >                     << entry_plug->Hmat[2][0] << ";\t"
247 >              
248 >               << entry_plug->Hmat[0][1] << "\t"
249 >                     << entry_plug->Hmat[1][1] << "\t"
250 >                     << entry_plug->Hmat[2][1] << ";\t"
251 >
252 >                     << entry_plug->Hmat[0][2] << "\t"
253 >                     << entry_plug->Hmat[1][2] << "\t"
254 >                     << entry_plug->Hmat[2][2] << ";";
255 >
256 >    //write out additional parameters, such as chi and eta
257 >    *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
258    }
259    
260 <  sprintf(checkPointMsg,"Opened file for final configuration\n");
334 <  MPIcheckPoint();  
335 <  
336 < #endif //is_mpi
260 >  for( i=0; i<nAtoms; i++ ){
261  
262 <    
262 >    atoms[i]->getPos(pos);
263 >    atoms[i]->getVel(vel);
264  
340 #ifndef IS_MPI
341    
342  finalOut << nAtoms << "\n";
343    
344  finalOut << entry_plug->box_x << "\t"
345           << entry_plug->box_y << "\t"
346           << entry_plug->box_z << "\n";
347    
348  for( i=0; i<nAtoms; i++ ){
349      
265      sprintf( tempBuffer,
266               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
267               atoms[i]->getType(),
268 <             atoms[i]->getX(),
269 <             atoms[i]->getY(),
270 <             atoms[i]->getZ(),
271 <             atoms[i]->get_vx(),
272 <             atoms[i]->get_vy(),
273 <             atoms[i]->get_vz());
268 >             pos[0],
269 >             pos[1],
270 >             pos[2],
271 >             vel[0],
272 >             vel[1],
273 >             vel[2]);
274      strcpy( writeLine, tempBuffer );
275  
276      if( atoms[i]->isDirectional() ){
277 <        
277 >
278        dAtom = (DirectionalAtom *)atoms[i];
279        dAtom->getQ( q );
280 <        
280 >
281        sprintf( tempBuffer,
282                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
283                 q[0],
# Line 376 | Line 291 | void DumpWriter::writeFinal(){
291      }
292      else
293        strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
294 <      
295 <    finalOut << writeLine;
294 >
295 >    for(k = 0; k < outFile.size(); k++)
296 >      *outFile[k] << writeLine;
297    }
382  finalOut.flush();
298  
299   #else // is_mpi
300  
301 <  int masterIndex;
302 <  int nodeAtomsStart;
303 <  int nodeAtomsEnd;
304 <  int mpiErr;
305 <  int sendError;
306 <  int procIndex;
307 <    
308 <  MPI_Status istatus[MPI_STATUS_SIZE];
301 >  /* code to find maximum tag value */
302 >  
303 >  int *tagub, flag, MAXTAG;
304 >  MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag);
305 >  if (flag) {
306 >    MAXTAG = *tagub;
307 >  } else {
308 >    MAXTAG = 32767;
309 >  }  
310  
311 <    
311 >  int haveError;
312 >
313 >  MPI_Status istatus;
314 >  int *AtomToProcMap = mpiSim->getAtomToProcMap();
315 >
316    // write out header and node 0's coordinates
317  
318    if( worldRank == 0 ){
319 <    finalOut << mpiSim->getTotAtoms() << "\n";
320 <      
321 <    finalOut << entry_plug->box_x << "\t"
322 <             << entry_plug->box_y << "\t"
323 <             << entry_plug->box_z << "\n";
319 >
320 >    // Node 0 needs a list of the magic potatoes for each processor;
321 >
322 >    nProc = mpiSim->getNumberProcessors();
323 >    potatoes = new int[nProc];
324 >
325 >    //write out the comment lines
326 >    for (i = 0; i < nProc; i++)
327 >      potatoes[i] = 0;
328      
329 <    masterIndex = 0;
330 <    
331 <    for( i=0; i<nAtoms; i++ ){
329 >      for(k = 0; k < outFile.size(); k++){
330 >        *outFile[k] << mpiSim->getTotAtoms() << "\n";
331 >
332 >        *outFile[k] << currentTime << ";\t"
333 >                         << entry_plug->Hmat[0][0] << "\t"
334 >                         << entry_plug->Hmat[1][0] << "\t"
335 >                         << entry_plug->Hmat[2][0] << ";\t"
336 >
337 >                         << entry_plug->Hmat[0][1] << "\t"
338 >                         << entry_plug->Hmat[1][1] << "\t"
339 >                         << entry_plug->Hmat[2][1] << ";\t"
340 >
341 >                         << entry_plug->Hmat[0][2] << "\t"
342 >                         << entry_plug->Hmat[1][2] << "\t"
343 >                         << entry_plug->Hmat[2][2] << ";";
344 >  
345 >        *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
346 >    }
347 >
348 >    currentIndex = 0;
349 >
350 >    for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
351        
352 <      sprintf( tempBuffer,
353 <               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
354 <               atoms[i]->getType(),
355 <               atoms[i]->getX(),
356 <               atoms[i]->getY(),
357 <               atoms[i]->getZ(),
358 <               atoms[i]->get_vx(),
359 <               atoms[i]->get_vy(),
360 <               atoms[i]->get_vz());
361 <      strcpy( writeLine, tempBuffer );
352 >      // Get the Node number which has this atom;
353 >      
354 >      which_node = AtomToProcMap[i];
355 >      
356 >      if (which_node != 0) {
357 >
358 >        if (potatoes[which_node] + 3 >= MAXTAG) {
359 >          // The potato was going to exceed the maximum value,
360 >          // so wrap this processor potato back to 0:        
361 >
362 >          potatoes[which_node] = 0;          
363 >          MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD);
364 >          
365 >        }
366 >
367 >        myPotato = potatoes[which_node];        
368 >        
369 >        MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node,
370 >                 myPotato, MPI_COMM_WORLD, &istatus);
371 >        
372 >        atomTypeString = MPIatomTypeString;
373          
374 <      if( atoms[i]->isDirectional() ){
374 >        myPotato++;
375 >
376 >        MPI_Recv(&isDirectional, 1, MPI_INT, which_node,
377 >                 myPotato, MPI_COMM_WORLD, &istatus);
378 >              
379 >        myPotato++;
380 >
381 >        if (isDirectional) {          
382 >          MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node,
383 >                   myPotato, MPI_COMM_WORLD, &istatus);
384 >        } else {
385 >          MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node,
386 >                   myPotato, MPI_COMM_WORLD, &istatus);          
387 >        }
388 >        
389 >        myPotato++;
390 >        potatoes[which_node] = myPotato;
391 >
392 >      } else {
393 >        
394 >        haveError = 0;
395 >        which_atom = i;
396 >        
397 >        //local_index = -1;
398 >
399 >        //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) {
400 >        //  if (atoms[j]->getGlobalIndex() == which_atom) local_index = j;
401 >        //}
402 >        
403 >        //if (local_index != -1) {
404            
405 <        dAtom = (DirectionalAtom *)atoms[i];
406 <        dAtom->getQ( q );
405 >          local_index = indexArray[currentIndex].first;        
406 >          
407 >          if (which_atom == indexArray[currentIndex].second) {
408 >            
409 >            atomTypeString = atoms[local_index]->getType();
410 >            
411 >          atoms[local_index]->getPos(pos);
412 >          atoms[local_index]->getVel(vel);          
413            
414 <        sprintf( tempBuffer,
415 <                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
416 <                 q[0],
417 <                 q[1],
418 <                 q[2],
419 <                 q[3],
420 <                 dAtom->getJx(),
421 <                 dAtom->getJy(),
422 <                 dAtom->getJz());
423 <        strcat( writeLine, tempBuffer );
414 >          atomData6[0] = pos[0];
415 >          atomData6[1] = pos[1];
416 >          atomData6[2] = pos[2];
417 >
418 >          atomData6[3] = vel[0];
419 >          atomData6[4] = vel[1];
420 >          atomData6[5] = vel[2];
421 >          
422 >          isDirectional = 0;
423 >
424 >          if( atoms[local_index]->isDirectional() ){
425 >
426 >            isDirectional = 1;
427 >            
428 >            dAtom = (DirectionalAtom *)atoms[local_index];
429 >            dAtom->getQ( q );
430 >
431 >            for (int j = 0; j < 6 ; j++)
432 >              atomData13[j] = atomData6[j];            
433 >            
434 >            atomData13[6] = q[0];
435 >            atomData13[7] = q[1];
436 >            atomData13[8] = q[2];
437 >            atomData13[9] = q[3];
438 >            
439 >            atomData13[10] = dAtom->getJx();
440 >            atomData13[11] = dAtom->getJy();
441 >            atomData13[12] = dAtom->getJz();
442 >          }
443 >          
444 >        } else {
445 >          sprintf(painCave.errMsg,
446 >                  "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
447 >                  which_atom, worldRank, currentIndex, local_index );
448 >          haveError= 1;
449 >          simError();
450 >        }
451 >        
452 >        if(haveError) DieDieDie();
453 >        
454 >        currentIndex++;
455        }
456 <      else
456 >      // If we've survived to here, format the line:
457 >      
458 >      if (!isDirectional) {
459 >        
460 >        sprintf( writeLine,
461 >                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
462 >                 atomTypeString,
463 >                 atomData6[0],
464 >                 atomData6[1],
465 >                 atomData6[2],
466 >                 atomData6[3],
467 >                 atomData6[4],
468 >                 atomData6[5]);
469 >        
470          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
471          
472 <      finalOut << writeLine;
473 <      masterIndex++;
472 >      } else {
473 >        
474 >        sprintf( writeLine,
475 >                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
476 >                 atomTypeString,
477 >                 atomData13[0],
478 >                 atomData13[1],
479 >                 atomData13[2],
480 >                 atomData13[3],
481 >                 atomData13[4],
482 >                 atomData13[5],
483 >                 atomData13[6],
484 >                 atomData13[7],
485 >                 atomData13[8],
486 >                 atomData13[9],
487 >                 atomData13[10],
488 >                 atomData13[11],
489 >                 atomData13[12]);
490 >        
491 >      }
492 >      
493 >      for(k = 0; k < outFile.size(); k++)
494 >        *outFile[k] << writeLine;
495      }
442    finalOut.flush();
443  }
496      
497 <  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
498 <       procIndex++){
497 >    for(k = 0; k < outFile.size(); k++)
498 >      outFile[k]->flush();
499 >    
500 >    sprintf( checkPointMsg,
501 >             "Sucessfully took a dump.\n");
502 >    
503 >    MPIcheckPoint();        
504 >    
505 >    delete[] potatoes;
506 >    
507 >  } else {
508  
509 <    if( worldRank == 0 ){
449 <        
450 <      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
451 <                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
509 >    // worldRank != 0, so I'm a remote node.  
510  
511 <      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
454 <                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
455 <        
456 <      // Make sure where node 0 is writing to, matches where the
457 <      // receiving node expects it to be.
458 <        
459 <      if (masterIndex != nodeAtomsStart){
460 <        sendError = 1;
461 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
462 <                          MPI_COMM_WORLD);
463 <        sprintf(painCave.errMsg,
464 <                "DumpWriter error: atoms start index (%d) for "
465 <                "node %d not equal to master index (%d)",
466 <                nodeAtomsStart,procIndex,masterIndex );
467 <        painCave.isFatal = 1;
468 <        simError();
469 <      }
470 <        
471 <      sendError = 0;
472 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
473 <                        MPI_COMM_WORLD);
511 >    // Set my magic potato to 0:
512  
513 <      // recieve the nodes writeLines
513 >    myPotato = 0;
514 >    currentIndex = 0;
515 >    
516 >    for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
517 >      
518 >      // Am I the node which has this atom?
519 >      
520 >      if (AtomToProcMap[i] == worldRank) {
521  
522 <      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
522 >        if (myPotato + 3 >= MAXTAG) {
523            
524 <        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
525 <                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
524 >          // The potato was going to exceed the maximum value,
525 >          // so wrap this processor potato back to 0 (and block until
526 >          // node 0 says we can go:
527 >          
528 >          MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus);
529 >          
530 >        }
531 >        which_atom = i;
532  
533 <        finalOut << writeLine;
483 <        masterIndex++;
484 <      }
533 >        //local_index = -1;
534  
535 <      finalOut.flush();
536 <    }
535 >        //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) {
536 >        // if (atoms[j]->getGlobalIndex() == which_atom) local_index = j;
537 >        //}
538 >        
539 >        //if (local_index != -1) {
540  
541 <    else if( worldRank == procIndex ){
541 >        local_index = indexArray[currentIndex].first;        
542 >                
543 >        if (which_atom == indexArray[currentIndex].second) {
544 >        
545 >          atomTypeString = atoms[local_index]->getType();
546 >          
547 >          atoms[local_index]->getPos(pos);
548 >          atoms[local_index]->getVel(vel);
549 >          
550 >          atomData6[0] = pos[0];
551 >          atomData6[1] = pos[1];
552 >          atomData6[2] = pos[2];
553  
554 <      nodeAtomsStart = mpiSim->getMyAtomStart();
555 <      nodeAtomsEnd = mpiSim->getMyAtomEnd();
556 <        
557 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
558 <                        MPI_COMM_WORLD);
496 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
497 <                        MPI_COMM_WORLD);
498 <        
499 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
500 <                        MPI_COMM_WORLD, istatus);
501 <      if (sendError) MPIcheckPoint();
554 >          atomData6[3] = vel[0];
555 >          atomData6[4] = vel[1];
556 >          atomData6[5] = vel[2];
557 >          
558 >          isDirectional = 0;
559  
560 <      // send current node's configuration line by line.
560 >          if( atoms[local_index]->isDirectional() ){
561  
562 <      for( i=0; i<nAtoms; i++ ){
563 <          
564 <        sprintf( tempBuffer,
565 <                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
566 <                 atoms[i]->getType(),
567 <                 atoms[i]->getX(),
568 <                 atoms[i]->getY(),
569 <                 atoms[i]->getZ(),
570 <                 atoms[i]->get_vx(),
571 <                 atoms[i]->get_vy(),
572 <                 atoms[i]->get_vz());
573 <        strcpy( writeLine, tempBuffer );
574 <          
575 <        if( atoms[i]->isDirectional() ){
576 <            
577 <          dAtom = (DirectionalAtom *)atoms[i];
578 <          dAtom->getQ( q );
579 <            
580 <          sprintf( tempBuffer,
581 <                   "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
582 <                   q[0],
583 <                   q[1],
584 <                   q[2],
585 <                   q[3],
529 <                   dAtom->getJx(),
530 <                   dAtom->getJy(),
531 <                   dAtom->getJz());
532 <          strcat( writeLine, tempBuffer );
562 >            isDirectional = 1;
563 >            
564 >            dAtom = (DirectionalAtom *)atoms[local_index];
565 >            dAtom->getQ( q );
566 >            
567 >            for (int j = 0; j < 6 ; j++)
568 >              atomData13[j] = atomData6[j];
569 >            
570 >            atomData13[6] = q[0];
571 >            atomData13[7] = q[1];
572 >            atomData13[8] = q[2];
573 >            atomData13[9] = q[3];
574 >  
575 >            atomData13[10] = dAtom->getJx();
576 >            atomData13[11] = dAtom->getJy();
577 >            atomData13[12] = dAtom->getJz();
578 >          }
579 >
580 >        } else {
581 >          sprintf(painCave.errMsg,
582 >                  "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
583 >                  which_atom, worldRank, currentIndex, local_index );
584 >          haveError= 1;
585 >          simError();
586          }
587 <        else
588 <          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
589 <          
590 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
591 <                          MPI_COMM_WORLD);
587 >        
588 >        strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE);
589 >
590 >        // null terminate the string before sending (just in case):
591 >        MPIatomTypeString[MINIBUFFERSIZE-1] = '\0';
592 >
593 >        MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0,
594 >                             myPotato, MPI_COMM_WORLD);
595 >        
596 >        myPotato++;
597 >
598 >        MPI_Send(&isDirectional, 1, MPI_INT, 0,
599 >                             myPotato, MPI_COMM_WORLD);
600 >        
601 >        myPotato++;
602 >        
603 >        if (isDirectional) {
604 >
605 >          MPI_Send(atomData13, 13, MPI_DOUBLE, 0,
606 >                   myPotato, MPI_COMM_WORLD);
607 >          
608 >        } else {
609 >
610 >          MPI_Send(atomData6, 6, MPI_DOUBLE, 0,
611 >                   myPotato, MPI_COMM_WORLD);
612 >        }
613 >
614 >        myPotato++;  
615 >        currentIndex++;    
616        }
617      }
541      
542    sprintf(checkPointMsg,"Node %d sent dump configuration.",
543            procIndex);
544    MPIcheckPoint();
545  }
618  
619 <  if( worldRank == 0 ) finalOut.close();
620 <
619 >    sprintf( checkPointMsg,
620 >             "Sucessfully took a dump.\n");
621 >    MPIcheckPoint();        
622      
623 +  }
624 +  
625   #endif // is_mpi
626   }
627 +
628 + #ifdef IS_MPI
629 +
630 + // a couple of functions to let us escape the write loop
631 +
632 + void dWrite::DieDieDie( void ){
633 +
634 +  MPI_Finalize();
635 +  exit (0);
636 + }
637 +
638 + #endif //is_mpi

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines