ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/DumpWriter.cpp
Revision: 1097
Committed: Mon Apr 12 20:32:20 2004 UTC (20 years, 2 months ago) by gezelter
File size: 14861 byte(s)
Log Message:
Changes for RigidBody dynamics (Somewhat extensive)

File Contents

# Content
1 #define _LARGEFILE_SOURCE64
2 #define _FILE_OFFSET_BITS 64
3
4 #include <string.h>
5 #include <iostream>
6 #include <fstream>
7 #include <algorithm>
8 #include <utility>
9
10 #ifdef IS_MPI
11 #include <mpi.h>
12 #include "mpiSimulation.hpp"
13
14 namespace dWrite{
15 void DieDieDie( void );
16 }
17
18 using namespace dWrite;
19 #endif //is_mpi
20
21 #include "ReadWrite.hpp"
22 #include "simError.h"
23
24 DumpWriter::DumpWriter( SimInfo* the_entry_plug ){
25
26 entry_plug = the_entry_plug;
27
28 #ifdef IS_MPI
29 if(worldRank == 0 ){
30 #endif // is_mpi
31
32 dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc );
33
34 if( !dumpFile ){
35
36 sprintf( painCave.errMsg,
37 "Could not open \"%s\" for dump output.\n",
38 entry_plug->sampleName);
39 painCave.isFatal = 1;
40 simError();
41 }
42
43 #ifdef IS_MPI
44 }
45
46 //sort the local atoms by global index
47 sortByGlobalIndex();
48
49 sprintf( checkPointMsg,
50 "Sucessfully opened output file for dumping.\n");
51 MPIcheckPoint();
52 #endif // is_mpi
53 }
54
55 DumpWriter::~DumpWriter( ){
56
57 #ifdef IS_MPI
58 if(worldRank == 0 ){
59 #endif // is_mpi
60
61 dumpFile.close();
62
63 #ifdef IS_MPI
64 }
65 #endif // is_mpi
66 }
67
68 #ifdef IS_MPI
69
70 /**
71 * A hook function to load balancing
72 */
73
74 void DumpWriter::update(){
75 sortByGlobalIndex();
76 }
77
78 /**
79 * Auxiliary sorting function
80 */
81
82 bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){
83 return p1.second < p2.second;
84 }
85
86 /**
87 * Sorting the local index by global index
88 */
89
90 void DumpWriter::sortByGlobalIndex(){
91 Atom** atoms = entry_plug->atoms;
92
93 indexArray.clear();
94
95 for(int i = 0; i < mpiSim->getMyNlocal();i++)
96 indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex()));
97
98 sort(indexArray.begin(), indexArray.end(), indexSortingCriterion);
99 }
100
101 #endif
102
103 void DumpWriter::writeDump(double currentTime){
104
105 ofstream finalOut;
106 vector<ofstream*> fileStreams;
107
108 #ifdef IS_MPI
109 if(worldRank == 0 ){
110 #endif
111 finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
112 if( !finalOut ){
113 sprintf( painCave.errMsg,
114 "Could not open \"%s\" for final dump output.\n",
115 entry_plug->finalName );
116 painCave.isFatal = 1;
117 simError();
118 }
119 #ifdef IS_MPI
120 }
121 #endif // is_mpi
122
123 fileStreams.push_back(&finalOut);
124 fileStreams.push_back(&dumpFile);
125
126 writeFrame(fileStreams, currentTime);
127
128 #ifdef IS_MPI
129 finalOut.close();
130 #endif
131
132 }
133
134 void DumpWriter::writeFinal(double currentTime){
135
136 ofstream finalOut;
137 vector<ofstream*> fileStreams;
138
139 #ifdef IS_MPI
140 if(worldRank == 0 ){
141 #endif // is_mpi
142
143 finalOut.open( entry_plug->finalName, ios::out | ios::trunc );
144
145 if( !finalOut ){
146 sprintf( painCave.errMsg,
147 "Could not open \"%s\" for final dump output.\n",
148 entry_plug->finalName );
149 painCave.isFatal = 1;
150 simError();
151 }
152
153 #ifdef IS_MPI
154 }
155 #endif // is_mpi
156
157 fileStreams.push_back(&finalOut);
158 writeFrame(fileStreams, currentTime);
159
160 #ifdef IS_MPI
161 finalOut.close();
162 #endif
163
164 }
165
166 void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){
167
168 const int BUFFERSIZE = 2000;
169 const int MINIBUFFERSIZE = 100;
170
171 char tempBuffer[BUFFERSIZE];
172 char writeLine[BUFFERSIZE];
173
174 int i, k;
175
176 #ifdef IS_MPI
177
178 /*********************************************************************
179 * Documentation? You want DOCUMENTATION?
180 *
181 * Why all the potatoes below?
182 *
183 * To make a long story short, the original version of DumpWriter
184 * worked in the most inefficient way possible. Node 0 would
185 * poke each of the node for an individual atom's formatted data
186 * as node 0 worked its way down the global index. This was particularly
187 * inefficient since the method blocked all processors at every atom
188 * (and did it twice!).
189 *
190 * An intermediate version of DumpWriter could be described from Node
191 * zero's perspective as follows:
192 *
193 * 1) Have 100 of your friends stand in a circle.
194 * 2) When you say go, have all of them start tossing potatoes at
195 * you (one at a time).
196 * 3) Catch the potatoes.
197 *
198 * It was an improvement, but MPI has buffers and caches that could
199 * best be described in this analogy as "potato nets", so there's no
200 * need to block the processors atom-by-atom.
201 *
202 * This new and improved DumpWriter works in an even more efficient
203 * way:
204 *
205 * 1) Have 100 of your friend stand in a circle.
206 * 2) When you say go, have them start tossing 5-pound bags of
207 * potatoes at you.
208 * 3) Once you've caught a friend's bag of potatoes,
209 * toss them a spud to let them know they can toss another bag.
210 *
211 * How's THAT for documentation?
212 *
213 *********************************************************************/
214
215 int *potatoes;
216 int myPotato;
217
218 int nProc;
219 int j, which_node, done, which_atom, local_index, currentIndex;
220 double atomData6[6];
221 double atomData13[13];
222 int isDirectional;
223 char* atomTypeString;
224 char MPIatomTypeString[MINIBUFFERSIZE];
225
226 #else //is_mpi
227 int nAtoms = entry_plug->n_atoms;
228 #endif //is_mpi
229
230 double q[4], ji[3];
231 DirectionalAtom* dAtom;
232 Atom** atoms = entry_plug->atoms;
233 double pos[3], vel[3];
234
235 #ifndef IS_MPI
236
237 for(k = 0; k < outFile.size(); k++){
238 *outFile[k] << nAtoms << "\n";
239
240 *outFile[k] << currentTime << ";\t"
241 << entry_plug->Hmat[0][0] << "\t"
242 << entry_plug->Hmat[1][0] << "\t"
243 << entry_plug->Hmat[2][0] << ";\t"
244
245 << entry_plug->Hmat[0][1] << "\t"
246 << entry_plug->Hmat[1][1] << "\t"
247 << entry_plug->Hmat[2][1] << ";\t"
248
249 << entry_plug->Hmat[0][2] << "\t"
250 << entry_plug->Hmat[1][2] << "\t"
251 << entry_plug->Hmat[2][2] << ";";
252
253 //write out additional parameters, such as chi and eta
254 *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
255 }
256
257 for( i=0; i<nAtoms; i++ ){
258
259 atoms[i]->getPos(pos);
260 atoms[i]->getVel(vel);
261
262 sprintf( tempBuffer,
263 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
264 atoms[i]->getType(),
265 pos[0],
266 pos[1],
267 pos[2],
268 vel[0],
269 vel[1],
270 vel[2]);
271 strcpy( writeLine, tempBuffer );
272
273 if( atoms[i]->isDirectional() ){
274
275 dAtom = (DirectionalAtom *)atoms[i];
276 dAtom->getQ( q );
277 dAtom->getJ( ji );
278
279 sprintf( tempBuffer,
280 "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
281 q[0],
282 q[1],
283 q[2],
284 q[3],
285 ji[0],
286 ji[1],
287 ji[2]);
288 strcat( writeLine, tempBuffer );
289 }
290 else
291 strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
292
293 for(k = 0; k < outFile.size(); k++)
294 *outFile[k] << writeLine;
295 }
296
297 #else // is_mpi
298
299 /* code to find maximum tag value */
300
301 int *tagub, flag, MAXTAG;
302 MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag);
303 if (flag) {
304 MAXTAG = *tagub;
305 } else {
306 MAXTAG = 32767;
307 }
308
309 int haveError;
310
311 MPI_Status istatus;
312 int *AtomToProcMap = mpiSim->getAtomToProcMap();
313
314 // write out header and node 0's coordinates
315
316 if( worldRank == 0 ){
317
318 // Node 0 needs a list of the magic potatoes for each processor;
319
320 nProc = mpiSim->getNumberProcessors();
321 potatoes = new int[nProc];
322
323 //write out the comment lines
324 for (i = 0; i < nProc; i++)
325 potatoes[i] = 0;
326
327 for(k = 0; k < outFile.size(); k++){
328 *outFile[k] << mpiSim->getTotAtoms() << "\n";
329
330 *outFile[k] << currentTime << ";\t"
331 << entry_plug->Hmat[0][0] << "\t"
332 << entry_plug->Hmat[1][0] << "\t"
333 << entry_plug->Hmat[2][0] << ";\t"
334
335 << entry_plug->Hmat[0][1] << "\t"
336 << entry_plug->Hmat[1][1] << "\t"
337 << entry_plug->Hmat[2][1] << ";\t"
338
339 << entry_plug->Hmat[0][2] << "\t"
340 << entry_plug->Hmat[1][2] << "\t"
341 << entry_plug->Hmat[2][2] << ";";
342
343 *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl;
344 }
345
346 currentIndex = 0;
347
348 for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
349
350 // Get the Node number which has this atom;
351
352 which_node = AtomToProcMap[i];
353
354 if (which_node != 0) {
355
356 if (potatoes[which_node] + 3 >= MAXTAG) {
357 // The potato was going to exceed the maximum value,
358 // so wrap this processor potato back to 0:
359
360 potatoes[which_node] = 0;
361 MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD);
362
363 }
364
365 myPotato = potatoes[which_node];
366
367 MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node,
368 myPotato, MPI_COMM_WORLD, &istatus);
369
370 atomTypeString = MPIatomTypeString;
371
372 myPotato++;
373
374 MPI_Recv(&isDirectional, 1, MPI_INT, which_node,
375 myPotato, MPI_COMM_WORLD, &istatus);
376
377 myPotato++;
378
379 if (isDirectional) {
380 MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node,
381 myPotato, MPI_COMM_WORLD, &istatus);
382 } else {
383 MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node,
384 myPotato, MPI_COMM_WORLD, &istatus);
385 }
386
387 myPotato++;
388 potatoes[which_node] = myPotato;
389
390 } else {
391
392 haveError = 0;
393 which_atom = i;
394
395 local_index = indexArray[currentIndex].first;
396
397 if (which_atom == indexArray[currentIndex].second) {
398
399 atomTypeString = atoms[local_index]->getType();
400
401 atoms[local_index]->getPos(pos);
402 atoms[local_index]->getVel(vel);
403
404 atomData6[0] = pos[0];
405 atomData6[1] = pos[1];
406 atomData6[2] = pos[2];
407
408 atomData6[3] = vel[0];
409 atomData6[4] = vel[1];
410 atomData6[5] = vel[2];
411
412 isDirectional = 0;
413
414 if( atoms[local_index]->isDirectional() ){
415
416 isDirectional = 1;
417
418 dAtom = (DirectionalAtom *)atoms[local_index];
419 dAtom->getQ( q );
420 dAtom->getJ( ji );
421
422 for (int j = 0; j < 6 ; j++)
423 atomData13[j] = atomData6[j];
424
425 atomData13[6] = q[0];
426 atomData13[7] = q[1];
427 atomData13[8] = q[2];
428 atomData13[9] = q[3];
429
430 atomData13[10] = ji[0];
431 atomData13[11] = ji[1];
432 atomData13[12] = ji[2];
433 }
434
435 } else {
436 sprintf(painCave.errMsg,
437 "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
438 which_atom, worldRank, currentIndex, local_index );
439 haveError= 1;
440 simError();
441 }
442
443 if(haveError) DieDieDie();
444
445 currentIndex++;
446 }
447 // If we've survived to here, format the line:
448
449 if (!isDirectional) {
450
451 sprintf( writeLine,
452 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t",
453 atomTypeString,
454 atomData6[0],
455 atomData6[1],
456 atomData6[2],
457 atomData6[3],
458 atomData6[4],
459 atomData6[5]);
460
461 strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" );
462
463 } else {
464
465 sprintf( writeLine,
466 "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n",
467 atomTypeString,
468 atomData13[0],
469 atomData13[1],
470 atomData13[2],
471 atomData13[3],
472 atomData13[4],
473 atomData13[5],
474 atomData13[6],
475 atomData13[7],
476 atomData13[8],
477 atomData13[9],
478 atomData13[10],
479 atomData13[11],
480 atomData13[12]);
481
482 }
483
484 for(k = 0; k < outFile.size(); k++)
485 *outFile[k] << writeLine;
486 }
487
488 for(k = 0; k < outFile.size(); k++)
489 outFile[k]->flush();
490
491 sprintf( checkPointMsg,
492 "Sucessfully took a dump.\n");
493
494 MPIcheckPoint();
495
496 delete[] potatoes;
497
498 } else {
499
500 // worldRank != 0, so I'm a remote node.
501
502 // Set my magic potato to 0:
503
504 myPotato = 0;
505 currentIndex = 0;
506
507 for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) {
508
509 // Am I the node which has this atom?
510
511 if (AtomToProcMap[i] == worldRank) {
512
513 if (myPotato + 3 >= MAXTAG) {
514
515 // The potato was going to exceed the maximum value,
516 // so wrap this processor potato back to 0 (and block until
517 // node 0 says we can go:
518
519 MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus);
520
521 }
522 which_atom = i;
523
524 local_index = indexArray[currentIndex].first;
525
526 if (which_atom == indexArray[currentIndex].second) {
527
528 atomTypeString = atoms[local_index]->getType();
529
530 atoms[local_index]->getPos(pos);
531 atoms[local_index]->getVel(vel);
532
533 atomData6[0] = pos[0];
534 atomData6[1] = pos[1];
535 atomData6[2] = pos[2];
536
537 atomData6[3] = vel[0];
538 atomData6[4] = vel[1];
539 atomData6[5] = vel[2];
540
541 isDirectional = 0;
542
543 if( atoms[local_index]->isDirectional() ){
544
545 isDirectional = 1;
546
547 dAtom = (DirectionalAtom *)atoms[local_index];
548 dAtom->getQ( q );
549 dAtom->getJ( ji );
550
551 for (int j = 0; j < 6 ; j++)
552 atomData13[j] = atomData6[j];
553
554 atomData13[6] = q[0];
555 atomData13[7] = q[1];
556 atomData13[8] = q[2];
557 atomData13[9] = q[3];
558
559 atomData13[10] = ji[0];
560 atomData13[11] = ji[1];
561 atomData13[12] = ji[2];
562 }
563
564 } else {
565 sprintf(painCave.errMsg,
566 "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n",
567 which_atom, worldRank, currentIndex, local_index );
568 haveError= 1;
569 simError();
570 }
571
572 strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE);
573
574 // null terminate the string before sending (just in case):
575 MPIatomTypeString[MINIBUFFERSIZE-1] = '\0';
576
577 MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0,
578 myPotato, MPI_COMM_WORLD);
579
580 myPotato++;
581
582 MPI_Send(&isDirectional, 1, MPI_INT, 0,
583 myPotato, MPI_COMM_WORLD);
584
585 myPotato++;
586
587 if (isDirectional) {
588
589 MPI_Send(atomData13, 13, MPI_DOUBLE, 0,
590 myPotato, MPI_COMM_WORLD);
591
592 } else {
593
594 MPI_Send(atomData6, 6, MPI_DOUBLE, 0,
595 myPotato, MPI_COMM_WORLD);
596 }
597
598 myPotato++;
599 currentIndex++;
600 }
601 }
602
603 sprintf( checkPointMsg,
604 "Sucessfully took a dump.\n");
605 MPIcheckPoint();
606
607 }
608
609 #endif // is_mpi
610 }
611
612 #ifdef IS_MPI
613
614 // a couple of functions to let us escape the write loop
615
616 void dWrite::DieDieDie( void ){
617
618 MPI_Finalize();
619 exit (0);
620 }
621
622 #endif //is_mpi