ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/DumpWriter.cpp
(Generate patch)

Comparing:
branches/mmeineke/OOPSE/libmdtools/DumpWriter.cpp (file contents), Revision 377 by mmeineke, Fri Mar 21 17:42:12 2003 UTC vs.
trunk/OOPSE/libmdtools/DumpWriter.cpp (file contents), Revision 951 by mmeineke, Fri Jan 16 21:51:51 2004 UTC

# Line 1 | Line 1
1 < #include <cstring>
1 > #define _FILE_OFFSET_BITS 64
2 >
3 > #include <string.h>
4   #include <iostream>
5   #include <fstream>
6 + #include <algorithm>
7 + #include <utility>
8  
9   #ifdef IS_MPI
10   #include <mpi.h>
11   #include "mpiSimulation.hpp"
12 < #define TAKE_THIS_TAG 0
12 >
13 > namespace dWrite{
14 >  void DieDieDie( void );
15 > }
16 >
17 > using namespace dWrite;
18   #endif //is_mpi
19  
20   #include "ReadWrite.hpp"
21   #include "simError.h"
22  
14
15
16
17
23   DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
24  
25    entry_plug = the_entry_plug;
# Line 22 | Line 27 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
27   #ifdef IS_MPI
28    if(worldRank == 0 ){
29   #endif // is_mpi
30 <    
31 <
32 <    
33 <    strcpy( outName, entry_plug->sampleName );
34 <    
30 <    outFile.open(outName, ios::out | ios::trunc );
31 <    
32 <    if( !outFile ){
33 <      
30 >
31 >    dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc );
32 >
33 >    if( !dumpFile ){
34 >
35        sprintf( painCave.errMsg,
36                 "Could not open \"%s\" for dump output.\n",
37 <               outName);
37 >               entry_plug->sampleName);
38        painCave.isFatal = 1;
39        simError();
40      }
40  
41    //outFile.setf( ios::scientific );
41  
42   #ifdef IS_MPI
43    }
44  
45 +  //sort the local atoms by global index
46 +  sortByGlobalIndex();
47 +  
48    sprintf( checkPointMsg,
49             "Sucessfully opened output file for dumping.\n");
50    MPIcheckPoint();
# Line 55 | Line 57 | DumpWriter::~DumpWriter( ){
57    if(worldRank == 0 ){
58   #endif // is_mpi
59  
60 <    outFile.close();
60 >    dumpFile.close();
61  
62   #ifdef IS_MPI
63    }
64   #endif // is_mpi
65   }
66  
67 < void DumpWriter::writeDump( double currentTime ){
67 > #ifdef IS_MPI
68 >
69 > /**
70 > * A hook function to load balancing
71 > */
72 >
73 > void DumpWriter::update(){
74 >  sortByGlobalIndex();          
75 > }
76    
77 <  const int BUFFERSIZE = 2000;
78 <  char tempBuffer[BUFFERSIZE];
79 <  char writeLine[BUFFERSIZE];
77 > /**
78 > * Auxiliary sorting function
79 > */
80 >
81 > bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){
82 >  return p1.second < p2.second;
83 > }
84  
85 <  int i;
86 <  double q[4];
87 <  DirectionalAtom* dAtom;
88 <  int nAtoms = entry_plug->n_atoms;
85 > /**
86 > * Sorting the local index by global index
87 > */
88 >
89 > void DumpWriter::sortByGlobalIndex(){
90    Atom** atoms = entry_plug->atoms;
91 <    
91 >  
92 >  indexArray.clear();
93 >  
94 >  for(int i = 0; i < mpiSim->getMyNlocal();i++)
95 >    indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex()));
96 >  
97 >  sort(indexArray.begin(), indexArray.end(), indexSortingCriterion);    
98  
99 < #ifndef IS_MPI
99 >  //for (int i = 0; i < mpiSim->getMyNlocal(); i++) {
100 >  //  printf("node %d has global %d at local %d\n", worldRank, indexArray[i].second, indexArray[i].first);
101 >  //}
102      
103 <  outFile << nAtoms << "\n";
81 <    
82 <  outFile << currentTime << "\t"
83 <          << entry_plug->box_x << "\t"
84 <          << entry_plug->box_y << "\t"
85 <          << entry_plug->box_z << "\n";
86 <    
87 <  for( i=0; i<nAtoms; i++ ){
88 <      
103 > }
104  
105 <    sprintf( tempBuffer,
91 <             "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
92 <             atoms[i]->getType(),
93 <             atoms[i]->getX(),
94 <             atoms[i]->getY(),
95 <             atoms[i]->getZ(),
96 <             atoms[i]->get_vx(),
97 <             atoms[i]->get_vy(),
98 <             atoms[i]->get_vz());
99 <    strcpy( writeLine, tempBuffer );
105 > #endif
106  
107 <    if( atoms[i]->isDirectional() ){
108 <        
109 <      dAtom = (DirectionalAtom *)atoms[i];
110 <      dAtom->getQ( q );
111 <        
112 <      sprintf( tempBuffer,
113 <               "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
114 <               q[0],
115 <               q[1],
116 <               q[2],
117 <               q[3],
118 <               dAtom->getJx(),
119 <               dAtom->getJy(),
120 <               dAtom->getJz());
121 <      strcat( writeLine, tempBuffer );
107 > void DumpWriter::writeDump(double currentTime){
108 >
109 >  ofstream finalOut;
110 >  vector<ofstream*> fileStreams;
111 >
112 > #ifdef IS_MPI
113 >  printf("Hello from node %d\n", worldRank);
114 >  sortByGlobalIndex();
115 >  if(worldRank == 0 ){
116 >    
117 >    finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
118 >    if( !finalOut ){
119 >      sprintf( painCave.errMsg,
120 >               "Could not open \"%s\" for final dump output.\n",
121 >               entry_plug->finalName );
122 >      painCave.isFatal = 1;
123 >      simError();
124      }
117    else
118      strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
119      
120    outFile << writeLine;
125    }
126 <  outFile.flush();
126 > #endif // is_mpi
127  
128 < #else // is_mpi
128 >  fileStreams.push_back(&finalOut);
129 >  fileStreams.push_back(&dumpFile);
130  
131 <  int masterIndex;
127 <  int nodeAtomsStart;
128 <  int nodeAtomsEnd;
129 <  int mpiErr;
130 <  int sendError;
131 <  int procIndex;
132 <    
133 <  MPI_Status istatus[MPI_STATUS_SIZE];
131 >  writeFrame(fileStreams, currentTime);
132  
133 <    
134 <  // write out header and node 0's coordinates
133 > #ifdef IS_MPI
134 >  finalOut.close();
135 > #endif
136 >        
137 > }
138  
139 <  if( worldRank == 0 ){
139 <    outFile << mpiSim->getTotAtoms() << "\n";
140 <      
141 <    outFile << currentTime << "\t"
142 <            << entry_plug->box_x << "\t"
143 <            << entry_plug->box_y << "\t"
144 <            << entry_plug->box_z << "\n";
139 > void DumpWriter::writeFinal(double currentTime){
140  
141 <    masterIndex = 0;
142 <    for( i=0; i<nAtoms; i++ ){
148 <      
149 <      sprintf( tempBuffer,
150 <               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
151 <               atoms[i]->getType(),
152 <               atoms[i]->getX(),
153 <               atoms[i]->getY(),
154 <               atoms[i]->getZ(),
155 <               atoms[i]->get_vx(),
156 <               atoms[i]->get_vy(),
157 <               atoms[i]->get_vz());
158 <      strcpy( writeLine, tempBuffer );
159 <        
160 <      if( atoms[i]->isDirectional() ){
161 <          
162 <        dAtom = (DirectionalAtom *)atoms[i];
163 <        dAtom->getQ( q );
164 <          
165 <        sprintf( tempBuffer,
166 <                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
167 <                 q[0],
168 <                 q[1],
169 <                 q[2],
170 <                 q[3],
171 <                 dAtom->getJx(),
172 <                 dAtom->getJy(),
173 <                 dAtom->getJz());
174 <        strcat( writeLine, tempBuffer );
175 <      }
176 <      else
177 <        strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
178 <        
179 <      outFile << writeLine;
180 <      masterIndex++;
181 <    }
182 <    outFile.flush();
183 <  }
141 >  ofstream finalOut;
142 >  vector<ofstream*> fileStreams;
143  
144 <  sprintf( checkPointMsg,
145 <           "Sucessfully wrote node 0's dump configuration.\n");
146 <  MPIcheckPoint();
188 <    
189 <  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
190 <       procIndex++){
144 > #ifdef IS_MPI
145 >  if(worldRank == 0 ){
146 > #endif // is_mpi
147  
148 <    if( worldRank == 0 ){
193 <      
194 <      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
195 <                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
196 <      
197 <      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
198 <                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
199 <      
200 <      // Make sure where node 0 is writing to, matches where the
201 <      // receiving node expects it to be.
202 <      
203 <      if (masterIndex != nodeAtomsStart){
204 <        sendError = 1;
205 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
206 <                          MPI_COMM_WORLD);
207 <        sprintf(painCave.errMsg,
208 <                "DumpWriter error: atoms start index (%d) for "
209 <                "node %d not equal to master index (%d)",
210 <                nodeAtomsStart,procIndex,masterIndex );
211 <        painCave.isFatal = 1;
212 <        simError();
213 <      }
214 <      
215 <      sendError = 0;
216 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
217 <                        MPI_COMM_WORLD);
218 <      
219 <      // recieve the nodes writeLines
220 <      
221 <      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
222 <        
223 <        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
224 <                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
225 <        
226 <        outFile << writeLine;
227 <        masterIndex++;
228 <      }
229 <    }
148 >    finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
149  
150 <    else if( worldRank == procIndex ){
151 <
152 <      nodeAtomsStart = mpiSim->getMyAtomStart();
153 <      nodeAtomsEnd = mpiSim->getMyAtomEnd();
154 <
155 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
237 <                        MPI_COMM_WORLD);
238 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
239 <                        MPI_COMM_WORLD);
240 <        
241 <      sendError = -1;
242 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
243 <                        MPI_COMM_WORLD, istatus);
244 <
245 <      if (sendError) MPIcheckPoint();
246 <
247 <      // send current node's configuration line by line.
248 <
249 <      for( i=0; i<nAtoms; i++ ){
250 <
251 <        sprintf( tempBuffer,
252 <                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
253 <                 atoms[i]->getType(),
254 <                 atoms[i]->getX(),
255 <                 atoms[i]->getY(),
256 <                 atoms[i]->getZ(),
257 <                 atoms[i]->get_vx(),
258 <                 atoms[i]->get_vy(),
259 <                 atoms[i]->get_vz()); // check here.
260 <        strcpy( writeLine, tempBuffer );
261 <          
262 <        if( atoms[i]->isDirectional() ){
263 <            
264 <          dAtom = (DirectionalAtom *)atoms[i];
265 <          dAtom->getQ( q );
266 <            
267 <          sprintf( tempBuffer,
268 <                   "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
269 <                   q[0],
270 <                   q[1],
271 <                   q[2],
272 <                   q[3],
273 <                   dAtom->getJx(),
274 <                   dAtom->getJy(),
275 <                   dAtom->getJz());
276 <          strcat( writeLine, tempBuffer );
277 <        }
278 <        else
279 <          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
280 <          
281 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
282 <                          MPI_COMM_WORLD);
283 <      }
150 >    if( !finalOut ){
151 >      sprintf( painCave.errMsg,
152 >               "Could not open \"%s\" for final dump output.\n",
153 >               entry_plug->finalName );
154 >      painCave.isFatal = 1;
155 >      simError();
156      }
157 <      
158 <    sprintf(checkPointMsg,"Node %d sent dump configuration.",
287 <            procIndex);
288 <    MPIcheckPoint();
157 >
158 > #ifdef IS_MPI
159    }
290    
160   #endif // is_mpi
161 +  
162 +  fileStreams.push_back(&finalOut);  
163 +  writeFrame(fileStreams, currentTime);
164 +
165 + #ifdef IS_MPI
166 +  finalOut.close();
167 + #endif
168 +  
169   }
170  
171 + void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){
172  
173 +  const int BUFFERSIZE = 2000;
174 +  const int MINIBUFFERSIZE = 100;
175  
176 < void DumpWriter::writeFinal(){
176 >  char tempBuffer[BUFFERSIZE];  
177 >  char writeLine[BUFFERSIZE];
178  
179 +  int i, k;
180  
181 <  const int BUFFERSIZE = 2000;
300 <  char tempBuffer[500];
301 <  char writeLine[BUFFERSIZE];
181 > #ifdef IS_MPI
182    
183 <  char finalName[500];
183 >  /*********************************************************************
184 >   * Documentation?  You want DOCUMENTATION?
185 >   *
186 >   * Why all the potatoes below?  
187 >   *
188 >   * To make a long story short, the original version of DumpWriter
189 >   * worked in the most inefficient way possible.  Node 0 would
190 >   * poke each of the node for an individual atom's formatted data
191 >   * as node 0 worked its way down the global index. This was particularly
192 >   * inefficient since the method blocked all processors at every atom
193 >   * (and did it twice!).
194 >   *
195 >   * An intermediate version of DumpWriter could be described from Node
196 >   * zero's perspective as follows:
197 >   *
198 >   *  1) Have 100 of your friends stand in a circle.
199 >   *  2) When you say go, have all of them start tossing potatoes at
200 >   *     you (one at a time).
201 >   *  3) Catch the potatoes.
202 >   *
203 >   * It was an improvement, but MPI has buffers and caches that could
204 >   * best be described in this analogy as "potato nets", so there's no
205 >   * need to block the processors atom-by-atom.
206 >   *
207 >   * This new and improved DumpWriter works in an even more efficient
208 >   * way:
209 >   *
210 >   *  1) Have 100 of your friend stand in a circle.
211 >   *  2) When you say go, have them start tossing 5-pound bags of
212 >   *     potatoes at you.
213 >   *  3) Once you've caught a friend's bag of potatoes,
214 >   *     toss them a spud to let them know they can toss another bag.
215 >   *
216 >   * How's THAT for documentation?
217 >   *
218 >   *********************************************************************/
219  
220 <  int i;
220 >  int *potatoes;
221 >  int myPotato;
222 >
223 >  int nProc;
224 >  int j, which_node, done, which_atom, local_index, currentIndex;
225 >  double atomData6[6];
226 >  double atomData13[13];
227 >  int isDirectional;
228 >  char* atomTypeString;
229 >  char MPIatomTypeString[MINIBUFFERSIZE];
230 >
231 > #else //is_mpi
232 >  int nAtoms = entry_plug->n_atoms;
233 > #endif //is_mpi
234 >
235    double q[4];
236    DirectionalAtom* dAtom;
308  int nAtoms = entry_plug->n_atoms;
237    Atom** atoms = entry_plug->atoms;
238 +  double pos[3], vel[3];
239 +
240 + #ifndef IS_MPI
241    
242 <  ofstream finalOut;
243 <  
244 < #ifdef IS_MPI
245 <  if(worldRank == 0 ){
246 < #endif // is_mpi
247 <    
248 <    strcpy( finalName, entry_plug->finalName );
249 <    
250 <    finalOut.open( finalName, ios::out | ios::trunc );
251 <    if( !finalOut ){
252 <      sprintf( painCave.errMsg,
253 <               "Could not open \"%s\" for final dump output.\n",
254 <               finalName );
255 <      painCave.isFatal = 1;
256 <      simError();
257 <    }
258 <    
259 <    // finalOut.setf( ios::scientific );
329 <    
330 < #ifdef IS_MPI
242 >  for(k = 0; k < outFile.size(); k++){
243 >    *outFile[k] << nAtoms << "\n";
244 >
245 >    *outFile[k] << currentTime << ";\t"
246 >               << entry_plug->Hmat[0][0] << "\t"
247 >                     << entry_plug->Hmat[1][0] << "\t"
248 >                     << entry_plug->Hmat[2][0] << ";\t"
249 >              
250 >               << entry_plug->Hmat[0][1] << "\t"
251 >                     << entry_plug->Hmat[1][1] << "\t"
252 >                     << entry_plug->Hmat[2][1] << ";\t"
253 >
254 >                     << entry_plug->Hmat[0][2] << "\t"
255 >                     << entry_plug->Hmat[1][2] << "\t"
256 >                     << entry_plug->Hmat[2][2] << ";";
257 >
258 >    //write out additional parameters, such as chi and eta
259 >    *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
260    }
261    
262 <  sprintf(checkPointMsg,"Opened file for final configuration\n");
334 <  MPIcheckPoint();  
335 <  
336 < #endif //is_mpi
262 >  for( i=0; i<nAtoms; i++ ){
263  
264 <    
264 >    atoms[i]->getPos(pos);
265 >    atoms[i]->getVel(vel);
266  
340 #ifndef IS_MPI
341    
342  finalOut << nAtoms << "\n";
343    
344  finalOut << entry_plug->box_x << "\t"
345           << entry_plug->box_y << "\t"
346           << entry_plug->box_z << "\n";
347    
348  for( i=0; i<nAtoms; i++ ){
349      
267      sprintf( tempBuffer,
268               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
269               atoms[i]->getType(),
270 <             atoms[i]->getX(),
271 <             atoms[i]->getY(),
272 <             atoms[i]->getZ(),
273 <             atoms[i]->get_vx(),
274 <             atoms[i]->get_vy(),
275 <             atoms[i]->get_vz());
270 >             pos[0],
271 >             pos[1],
272 >             pos[2],
273 >             vel[0],
274 >             vel[1],
275 >             vel[2]);
276      strcpy( writeLine, tempBuffer );
277  
278      if( atoms[i]->isDirectional() ){
279 <        
279 >
280        dAtom = (DirectionalAtom *)atoms[i];
281        dAtom->getQ( q );
282 <        
282 >
283        sprintf( tempBuffer,
284                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
285                 q[0],
# Line 376 | Line 293 | void DumpWriter::writeFinal(){
293      }
294      else
295        strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
296 <      
297 <    finalOut << writeLine;
296 >
297 >    for(k = 0; k < outFile.size(); k++)
298 >      *outFile[k] << writeLine;
299    }
382  finalOut.flush();
300  
301   #else // is_mpi
302  
303 <  int masterIndex;
304 <  int nodeAtomsStart;
305 <  int nodeAtomsEnd;
306 <  int mpiErr;
307 <  int sendError;
308 <  int procIndex;
309 <    
310 <  MPI_Status istatus[MPI_STATUS_SIZE];
303 >  /* code to find maximum tag value */
304 >  
305 >  int *tagub, flag, MAXTAG;
306 >  MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag);
307 >  if (flag) {
308 >    MAXTAG = *tagub;
309 >  } else {
310 >    MAXTAG = 32767;
311 >  }  
312  
313 <    
313 >  int haveError;
314 >
315 >  MPI_Status istatus;
316 >  int *AtomToProcMap = mpiSim->getAtomToProcMap();
317 >
318    // write out header and node 0's coordinates
319  
320    if( worldRank == 0 ){
321 <    finalOut << mpiSim->getTotAtoms() << "\n";
322 <      
323 <    finalOut << entry_plug->box_x << "\t"
324 <             << entry_plug->box_y << "\t"
325 <             << entry_plug->box_z << "\n";
321 >
322 >    // Node 0 needs a list of the magic potatoes for each processor;
323 >
324 >    nProc = mpiSim->getNumberProcessors();
325 >    potatoes = new int[nProc];
326 >
327 >    //write out the comment lines
328 >    for (i = 0; i < nProc; i++)
329 >      potatoes[i] = 0;
330      
331 <    masterIndex = 0;
332 <    
333 <    for( i=0; i<nAtoms; i++ ){
331 >      for(k = 0; k < outFile.size(); k++){
332 >        *outFile[k] << mpiSim->getTotAtoms() << "\n";
333 >
334 >        *outFile[k] << currentTime << ";\t"
335 >                         << entry_plug->Hmat[0][0] << "\t"
336 >                         << entry_plug->Hmat[1][0] << "\t"
337 >                         << entry_plug->Hmat[2][0] << ";\t"
338 >
339 >                         << entry_plug->Hmat[0][1] << "\t"
340 >                         << entry_plug->Hmat[1][1] << "\t"
341 >                         << entry_plug->Hmat[2][1] << ";\t"
342 >
343 >                         << entry_plug->Hmat[0][2] << "\t"
344 >                         << entry_plug->Hmat[1][2] << "\t"
345 >                         << entry_plug->Hmat[2][2] << ";";
346 >  
347 >        *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
348 >    }
349 >
350 >    currentIndex = 0;
351 >
352 >    for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
353        
354 <      sprintf( tempBuffer,
355 <               "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
356 <               atoms[i]->getType(),
357 <               atoms[i]->getX(),
358 <               atoms[i]->getY(),
359 <               atoms[i]->getZ(),
360 <               atoms[i]->get_vx(),
361 <               atoms[i]->get_vy(),
362 <               atoms[i]->get_vz());
363 <      strcpy( writeLine, tempBuffer );
354 >      // Get the Node number which has this atom;
355 >      
356 >      which_node = AtomToProcMap[i];
357 >      
358 >      if (which_node != 0) {
359 >
360 >        if (potatoes[which_node] + 3 >= MAXTAG) {
361 >          // The potato was going to exceed the maximum value,
362 >          // so wrap this processor potato back to 0:        
363 >
364 >          potatoes[which_node] = 0;          
365 >          MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD);
366 >          
367 >        }
368 >
369 >        myPotato = potatoes[which_node];        
370 >        
371 >        MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node,
372 >                 myPotato, MPI_COMM_WORLD, &istatus);
373 >        
374 >        atomTypeString = MPIatomTypeString;
375          
376 <      if( atoms[i]->isDirectional() ){
376 >        myPotato++;
377 >
378 >        MPI_Recv(&isDirectional, 1, MPI_INT, which_node,
379 >                 myPotato, MPI_COMM_WORLD, &istatus);
380 >              
381 >        myPotato++;
382 >
383 >        if (isDirectional) {          
384 >          MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node,
385 >                   myPotato, MPI_COMM_WORLD, &istatus);
386 >        } else {
387 >          MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node,
388 >                   myPotato, MPI_COMM_WORLD, &istatus);          
389 >        }
390 >        
391 >        myPotato++;
392 >        potatoes[which_node] = myPotato;
393 >
394 >      } else {
395 >        
396 >        haveError = 0;
397 >        which_atom = i;
398 >        
399 >        //local_index = -1;
400 >
401 >        //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) {
402 >        //  if (atoms[j]->getGlobalIndex() == which_atom) local_index = j;
403 >        //}
404 >        
405 >        //if (local_index != -1) {
406            
407 <        dAtom = (DirectionalAtom *)atoms[i];
408 <        dAtom->getQ( q );
407 >          local_index = indexArray[currentIndex].first;        
408 >          
409 >          if (which_atom == indexArray[currentIndex].second) {
410 >            
411 >            atomTypeString = atoms[local_index]->getType();
412 >            
413 >          atoms[local_index]->getPos(pos);
414 >          atoms[local_index]->getVel(vel);          
415            
416 <        sprintf( tempBuffer,
417 <                 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
418 <                 q[0],
419 <                 q[1],
420 <                 q[2],
421 <                 q[3],
422 <                 dAtom->getJx(),
423 <                 dAtom->getJy(),
424 <                 dAtom->getJz());
425 <        strcat( writeLine, tempBuffer );
416 >          atomData6[0] = pos[0];
417 >          atomData6[1] = pos[1];
418 >          atomData6[2] = pos[2];
419 >
420 >          atomData6[3] = vel[0];
421 >          atomData6[4] = vel[1];
422 >          atomData6[5] = vel[2];
423 >          
424 >          isDirectional = 0;
425 >
426 >          if( atoms[local_index]->isDirectional() ){
427 >
428 >            isDirectional = 1;
429 >            
430 >            dAtom = (DirectionalAtom *)atoms[local_index];
431 >            dAtom->getQ( q );
432 >
433 >            for (int j = 0; j < 6 ; j++)
434 >              atomData13[j] = atomData6[j];            
435 >            
436 >            atomData13[6] = q[0];
437 >            atomData13[7] = q[1];
438 >            atomData13[8] = q[2];
439 >            atomData13[9] = q[3];
440 >            
441 >            atomData13[10] = dAtom->getJx();
442 >            atomData13[11] = dAtom->getJy();
443 >            atomData13[12] = dAtom->getJz();
444 >          }
445 >          
446 >        } else {
447 >          sprintf(painCave.errMsg,
448 >                  "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
449 >                  which_atom, worldRank, currentIndex, local_index );
450 >          haveError= 1;
451 >          simError();
452 >        }
453 >        
454 >        if(haveError) DieDieDie();
455 >        
456 >        currentIndex++;
457        }
458 <      else
458 >      // If we've survived to here, format the line:
459 >      
460 >      if (!isDirectional) {
461 >        
462 >        sprintf( writeLine,
463 >                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
464 >                 atomTypeString,
465 >                 atomData6[0],
466 >                 atomData6[1],
467 >                 atomData6[2],
468 >                 atomData6[3],
469 >                 atomData6[4],
470 >                 atomData6[5]);
471 >        
472          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
473          
474 <      finalOut << writeLine;
475 <      masterIndex++;
474 >      } else {
475 >        
476 >        sprintf( writeLine,
477 >                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
478 >                 atomTypeString,
479 >                 atomData13[0],
480 >                 atomData13[1],
481 >                 atomData13[2],
482 >                 atomData13[3],
483 >                 atomData13[4],
484 >                 atomData13[5],
485 >                 atomData13[6],
486 >                 atomData13[7],
487 >                 atomData13[8],
488 >                 atomData13[9],
489 >                 atomData13[10],
490 >                 atomData13[11],
491 >                 atomData13[12]);
492 >        
493 >      }
494 >      
495 >      for(k = 0; k < outFile.size(); k++)
496 >        *outFile[k] << writeLine;
497      }
442    finalOut.flush();
443  }
498      
499 <  for (procIndex = 1; procIndex < mpiSim->getNumberProcessors();
500 <       procIndex++){
499 >    for(k = 0; k < outFile.size(); k++)
500 >      outFile[k]->flush();
501 >    
502 >    sprintf( checkPointMsg,
503 >             "Sucessfully took a dump.\n");
504 >    
505 >    MPIcheckPoint();        
506 >    
507 >    delete[] potatoes;
508 >    
509 >  } else {
510  
511 <    if( worldRank == 0 ){
449 <        
450 <      mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex,
451 <                        TAKE_THIS_TAG,MPI_COMM_WORLD,istatus);
511 >    // worldRank != 0, so I'm a remote node.  
512  
513 <      mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex,
454 <                        TAKE_THIS_TAG,MPI_COMM_WORLD, istatus);
455 <        
456 <      // Make sure where node 0 is writing to, matches where the
457 <      // receiving node expects it to be.
458 <        
459 <      if (masterIndex != nodeAtomsStart){
460 <        sendError = 1;
461 <        mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
462 <                          MPI_COMM_WORLD);
463 <        sprintf(painCave.errMsg,
464 <                "DumpWriter error: atoms start index (%d) for "
465 <                "node %d not equal to master index (%d)",
466 <                nodeAtomsStart,procIndex,masterIndex );
467 <        painCave.isFatal = 1;
468 <        simError();
469 <      }
470 <        
471 <      sendError = 0;
472 <      mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG,
473 <                        MPI_COMM_WORLD);
513 >    // Set my magic potato to 0:
514  
515 <      // recieve the nodes writeLines
515 >    myPotato = 0;
516 >    currentIndex = 0;
517 >    
518 >    for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
519 >      
520 >      // Am I the node which has this atom?
521 >      
522 >      if (AtomToProcMap[i] == worldRank) {
523  
524 <      for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){
524 >        if (myPotato + 3 >= MAXTAG) {
525            
526 <        mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex,
527 <                          TAKE_THIS_TAG,MPI_COMM_WORLD,istatus );
526 >          // The potato was going to exceed the maximum value,
527 >          // so wrap this processor potato back to 0 (and block until
528 >          // node 0 says we can go:
529 >          
530 >          MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus);
531 >          
532 >        }
533 >        which_atom = i;
534  
535 <        finalOut << writeLine;
483 <        masterIndex++;
484 <      }
535 >        //local_index = -1;
536  
537 <      finalOut.flush();
538 <    }
537 >        //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) {
538 >        // if (atoms[j]->getGlobalIndex() == which_atom) local_index = j;
539 >        //}
540 >        
541 >        //if (local_index != -1) {
542  
543 <    else if( worldRank == procIndex ){
543 >        local_index = indexArray[currentIndex].first;        
544 >                
545 >        if (which_atom == indexArray[currentIndex].second) {
546 >        
547 >          atomTypeString = atoms[local_index]->getType();
548 >          
549 >          atoms[local_index]->getPos(pos);
550 >          atoms[local_index]->getVel(vel);
551 >          
552 >          atomData6[0] = pos[0];
553 >          atomData6[1] = pos[1];
554 >          atomData6[2] = pos[2];
555  
556 <      nodeAtomsStart = mpiSim->getMyAtomStart();
557 <      nodeAtomsEnd = mpiSim->getMyAtomEnd();
558 <        
559 <      mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG,
560 <                        MPI_COMM_WORLD);
496 <      mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG,
497 <                        MPI_COMM_WORLD);
498 <        
499 <      mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG,
500 <                        MPI_COMM_WORLD, istatus);
501 <      if (sendError) MPIcheckPoint();
556 >          atomData6[3] = vel[0];
557 >          atomData6[4] = vel[1];
558 >          atomData6[5] = vel[2];
559 >          
560 >          isDirectional = 0;
561  
562 <      // send current node's configuration line by line.
562 >          if( atoms[local_index]->isDirectional() ){
563  
564 <      for( i=0; i<nAtoms; i++ ){
565 <          
566 <        sprintf( tempBuffer,
567 <                 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
568 <                 atoms[i]->getType(),
569 <                 atoms[i]->getX(),
570 <                 atoms[i]->getY(),
571 <                 atoms[i]->getZ(),
572 <                 atoms[i]->get_vx(),
573 <                 atoms[i]->get_vy(),
574 <                 atoms[i]->get_vz());
575 <        strcpy( writeLine, tempBuffer );
576 <          
577 <        if( atoms[i]->isDirectional() ){
578 <            
579 <          dAtom = (DirectionalAtom *)atoms[i];
580 <          dAtom->getQ( q );
581 <            
582 <          sprintf( tempBuffer,
583 <                   "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
584 <                   q[0],
585 <                   q[1],
586 <                   q[2],
587 <                   q[3],
529 <                   dAtom->getJx(),
530 <                   dAtom->getJy(),
531 <                   dAtom->getJz());
532 <          strcat( writeLine, tempBuffer );
564 >            isDirectional = 1;
565 >            
566 >            dAtom = (DirectionalAtom *)atoms[local_index];
567 >            dAtom->getQ( q );
568 >            
569 >            for (int j = 0; j < 6 ; j++)
570 >              atomData13[j] = atomData6[j];
571 >            
572 >            atomData13[6] = q[0];
573 >            atomData13[7] = q[1];
574 >            atomData13[8] = q[2];
575 >            atomData13[9] = q[3];
576 >  
577 >            atomData13[10] = dAtom->getJx();
578 >            atomData13[11] = dAtom->getJy();
579 >            atomData13[12] = dAtom->getJz();
580 >          }
581 >
582 >        } else {
583 >          sprintf(painCave.errMsg,
584 >                  "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
585 >                  which_atom, worldRank, currentIndex, local_index );
586 >          haveError= 1;
587 >          simError();
588          }
589 <        else
590 <          strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
591 <          
592 <        mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG,
593 <                          MPI_COMM_WORLD);
589 >        
590 >        strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE);
591 >
592 >        // null terminate the string before sending (just in case):
593 >        MPIatomTypeString[MINIBUFFERSIZE-1] = '\0';
594 >
595 >        MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0,
596 >                             myPotato, MPI_COMM_WORLD);
597 >        
598 >        myPotato++;
599 >
600 >        MPI_Send(&isDirectional, 1, MPI_INT, 0,
601 >                             myPotato, MPI_COMM_WORLD);
602 >        
603 >        myPotato++;
604 >        
605 >        if (isDirectional) {
606 >
607 >          MPI_Send(atomData13, 13, MPI_DOUBLE, 0,
608 >                   myPotato, MPI_COMM_WORLD);
609 >          
610 >        } else {
611 >
612 >          MPI_Send(atomData6, 6, MPI_DOUBLE, 0,
613 >                   myPotato, MPI_COMM_WORLD);
614 >        }
615 >
616 >        myPotato++;  
617 >        currentIndex++;    
618        }
619      }
541      
542    sprintf(checkPointMsg,"Node %d sent dump configuration.",
543            procIndex);
544    MPIcheckPoint();
545  }
620  
621 <  if( worldRank == 0 ) finalOut.close();
622 <
621 >    sprintf( checkPointMsg,
622 >             "Sucessfully took a dump.\n");
623 >    MPIcheckPoint();        
624      
625 +  }
626 +  
627   #endif // is_mpi
628   }
629 +
630 + #ifdef IS_MPI
631 +
632 + // a couple of functions to let us escape the write loop
633 +
634 + void dWrite::DieDieDie( void ){
635 +
636 +  MPI_Finalize();
637 +  exit (0);
638 + }
639 +
640 + #endif //is_mpi

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines