1 |
#define _FILE_OFFSET_BITS 64 |
2 |
|
3 |
#include <string.h> |
4 |
#include <iostream> |
5 |
#include <fstream> |
6 |
#include <algorithm> |
7 |
#include <utility> |
8 |
|
9 |
#ifdef IS_MPI |
10 |
#include <mpi.h> |
11 |
#include "mpiSimulation.hpp" |
12 |
|
13 |
namespace dWrite{ |
14 |
void DieDieDie( void ); |
15 |
} |
16 |
|
17 |
using namespace dWrite; |
18 |
#endif //is_mpi |
19 |
|
20 |
#include "ReadWrite.hpp" |
21 |
#include "simError.h" |
22 |
|
23 |
DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ |
24 |
|
25 |
entry_plug = the_entry_plug; |
26 |
|
27 |
#ifdef IS_MPI |
28 |
if(worldRank == 0 ){ |
29 |
#endif // is_mpi |
30 |
|
31 |
dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc ); |
32 |
|
33 |
if( !dumpFile ){ |
34 |
|
35 |
sprintf( painCave.errMsg, |
36 |
"Could not open \"%s\" for dump output.\n", |
37 |
entry_plug->sampleName); |
38 |
painCave.isFatal = 1; |
39 |
simError(); |
40 |
} |
41 |
|
42 |
#ifdef IS_MPI |
43 |
} |
44 |
|
45 |
//sort the local atoms by global index |
46 |
sortByGlobalIndex(); |
47 |
|
48 |
sprintf( checkPointMsg, |
49 |
"Sucessfully opened output file for dumping.\n"); |
50 |
MPIcheckPoint(); |
51 |
#endif // is_mpi |
52 |
} |
53 |
|
54 |
DumpWriter::~DumpWriter( ){ |
55 |
|
56 |
#ifdef IS_MPI |
57 |
if(worldRank == 0 ){ |
58 |
#endif // is_mpi |
59 |
|
60 |
dumpFile.close(); |
61 |
|
62 |
#ifdef IS_MPI |
63 |
} |
64 |
#endif // is_mpi |
65 |
} |
66 |
|
67 |
#ifdef IS_MPI |
68 |
|
69 |
/** |
70 |
* A hook function to load balancing |
71 |
*/ |
72 |
|
73 |
void DumpWriter::update(){ |
74 |
sortByGlobalIndex(); |
75 |
} |
76 |
|
77 |
/** |
78 |
* Auxiliary sorting function |
79 |
*/ |
80 |
|
81 |
bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){ |
82 |
return p1.second < p2.second; |
83 |
} |
84 |
|
85 |
/** |
86 |
* Sorting the local index by global index |
87 |
*/ |
88 |
|
89 |
void DumpWriter::sortByGlobalIndex(){ |
90 |
Atom** atoms = entry_plug->atoms; |
91 |
|
92 |
indexArray.clear(); |
93 |
|
94 |
for(int i = 0; i < mpiSim->getMyNlocal();i++) |
95 |
indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex())); |
96 |
|
97 |
sort(indexArray.begin(), indexArray.end(), indexSortingCriterion); |
98 |
|
99 |
//for (int i = 0; i < mpiSim->getMyNlocal(); i++) { |
100 |
// printf("node %d has global %d at local %d\n", worldRank, indexArray[i].second, indexArray[i].first); |
101 |
//} |
102 |
|
103 |
} |
104 |
|
105 |
#endif |
106 |
|
107 |
void DumpWriter::writeDump(double currentTime){ |
108 |
|
109 |
ofstream finalOut; |
110 |
vector<ofstream*> fileStreams; |
111 |
|
112 |
#ifdef IS_MPI |
113 |
printf("Hello from node %d\n", worldRank); |
114 |
sortByGlobalIndex(); |
115 |
if(worldRank == 0 ){ |
116 |
|
117 |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
118 |
if( !finalOut ){ |
119 |
sprintf( painCave.errMsg, |
120 |
"Could not open \"%s\" for final dump output.\n", |
121 |
entry_plug->finalName ); |
122 |
painCave.isFatal = 1; |
123 |
simError(); |
124 |
} |
125 |
} |
126 |
#endif // is_mpi |
127 |
|
128 |
fileStreams.push_back(&finalOut); |
129 |
fileStreams.push_back(&dumpFile); |
130 |
|
131 |
writeFrame(fileStreams, currentTime); |
132 |
|
133 |
#ifdef IS_MPI |
134 |
finalOut.close(); |
135 |
#endif |
136 |
|
137 |
} |
138 |
|
139 |
void DumpWriter::writeFinal(double currentTime){ |
140 |
|
141 |
ofstream finalOut; |
142 |
vector<ofstream*> fileStreams; |
143 |
|
144 |
#ifdef IS_MPI |
145 |
if(worldRank == 0 ){ |
146 |
#endif // is_mpi |
147 |
|
148 |
finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
149 |
|
150 |
if( !finalOut ){ |
151 |
sprintf( painCave.errMsg, |
152 |
"Could not open \"%s\" for final dump output.\n", |
153 |
entry_plug->finalName ); |
154 |
painCave.isFatal = 1; |
155 |
simError(); |
156 |
} |
157 |
|
158 |
#ifdef IS_MPI |
159 |
} |
160 |
#endif // is_mpi |
161 |
|
162 |
fileStreams.push_back(&finalOut); |
163 |
writeFrame(fileStreams, currentTime); |
164 |
|
165 |
#ifdef IS_MPI |
166 |
finalOut.close(); |
167 |
#endif |
168 |
|
169 |
} |
170 |
|
171 |
void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){ |
172 |
|
173 |
const int BUFFERSIZE = 2000; |
174 |
const int MINIBUFFERSIZE = 100; |
175 |
|
176 |
char tempBuffer[BUFFERSIZE]; |
177 |
char writeLine[BUFFERSIZE]; |
178 |
|
179 |
int i, k; |
180 |
|
181 |
#ifdef IS_MPI |
182 |
|
183 |
/********************************************************************* |
184 |
* Documentation? You want DOCUMENTATION? |
185 |
* |
186 |
* Why all the potatoes below? |
187 |
* |
188 |
* To make a long story short, the original version of DumpWriter |
189 |
* worked in the most inefficient way possible. Node 0 would |
190 |
* poke each of the node for an individual atom's formatted data |
191 |
* as node 0 worked its way down the global index. This was particularly |
192 |
* inefficient since the method blocked all processors at every atom |
193 |
* (and did it twice!). |
194 |
* |
195 |
* An intermediate version of DumpWriter could be described from Node |
196 |
* zero's perspective as follows: |
197 |
* |
198 |
* 1) Have 100 of your friends stand in a circle. |
199 |
* 2) When you say go, have all of them start tossing potatoes at |
200 |
* you (one at a time). |
201 |
* 3) Catch the potatoes. |
202 |
* |
203 |
* It was an improvement, but MPI has buffers and caches that could |
204 |
* best be described in this analogy as "potato nets", so there's no |
205 |
* need to block the processors atom-by-atom. |
206 |
* |
207 |
* This new and improved DumpWriter works in an even more efficient |
208 |
* way: |
209 |
* |
210 |
* 1) Have 100 of your friend stand in a circle. |
211 |
* 2) When you say go, have them start tossing 5-pound bags of |
212 |
* potatoes at you. |
213 |
* 3) Once you've caught a friend's bag of potatoes, |
214 |
* toss them a spud to let them know they can toss another bag. |
215 |
* |
216 |
* How's THAT for documentation? |
217 |
* |
218 |
*********************************************************************/ |
219 |
|
220 |
int *potatoes; |
221 |
int myPotato; |
222 |
|
223 |
int nProc; |
224 |
int j, which_node, done, which_atom, local_index, currentIndex; |
225 |
double atomData6[6]; |
226 |
double atomData13[13]; |
227 |
int isDirectional; |
228 |
char* atomTypeString; |
229 |
char MPIatomTypeString[MINIBUFFERSIZE]; |
230 |
|
231 |
#else //is_mpi |
232 |
int nAtoms = entry_plug->n_atoms; |
233 |
#endif //is_mpi |
234 |
|
235 |
double q[4]; |
236 |
DirectionalAtom* dAtom; |
237 |
Atom** atoms = entry_plug->atoms; |
238 |
double pos[3], vel[3]; |
239 |
|
240 |
#ifndef IS_MPI |
241 |
|
242 |
for(k = 0; k < outFile.size(); k++){ |
243 |
*outFile[k] << nAtoms << "\n"; |
244 |
|
245 |
*outFile[k] << currentTime << ";\t" |
246 |
<< entry_plug->Hmat[0][0] << "\t" |
247 |
<< entry_plug->Hmat[1][0] << "\t" |
248 |
<< entry_plug->Hmat[2][0] << ";\t" |
249 |
|
250 |
<< entry_plug->Hmat[0][1] << "\t" |
251 |
<< entry_plug->Hmat[1][1] << "\t" |
252 |
<< entry_plug->Hmat[2][1] << ";\t" |
253 |
|
254 |
<< entry_plug->Hmat[0][2] << "\t" |
255 |
<< entry_plug->Hmat[1][2] << "\t" |
256 |
<< entry_plug->Hmat[2][2] << ";"; |
257 |
|
258 |
//write out additional parameters, such as chi and eta |
259 |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
260 |
} |
261 |
|
262 |
for( i=0; i<nAtoms; i++ ){ |
263 |
|
264 |
atoms[i]->getPos(pos); |
265 |
atoms[i]->getVel(vel); |
266 |
|
267 |
sprintf( tempBuffer, |
268 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
269 |
atoms[i]->getType(), |
270 |
pos[0], |
271 |
pos[1], |
272 |
pos[2], |
273 |
vel[0], |
274 |
vel[1], |
275 |
vel[2]); |
276 |
strcpy( writeLine, tempBuffer ); |
277 |
|
278 |
if( atoms[i]->isDirectional() ){ |
279 |
|
280 |
dAtom = (DirectionalAtom *)atoms[i]; |
281 |
dAtom->getQ( q ); |
282 |
|
283 |
sprintf( tempBuffer, |
284 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
285 |
q[0], |
286 |
q[1], |
287 |
q[2], |
288 |
q[3], |
289 |
dAtom->getJx(), |
290 |
dAtom->getJy(), |
291 |
dAtom->getJz()); |
292 |
strcat( writeLine, tempBuffer ); |
293 |
} |
294 |
else |
295 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
296 |
|
297 |
for(k = 0; k < outFile.size(); k++) |
298 |
*outFile[k] << writeLine; |
299 |
} |
300 |
|
301 |
#else // is_mpi |
302 |
|
303 |
/* code to find maximum tag value */ |
304 |
|
305 |
int *tagub, flag, MAXTAG; |
306 |
MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag); |
307 |
if (flag) { |
308 |
MAXTAG = *tagub; |
309 |
} else { |
310 |
MAXTAG = 32767; |
311 |
} |
312 |
|
313 |
int haveError; |
314 |
|
315 |
MPI_Status istatus; |
316 |
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
317 |
|
318 |
// write out header and node 0's coordinates |
319 |
|
320 |
if( worldRank == 0 ){ |
321 |
|
322 |
// Node 0 needs a list of the magic potatoes for each processor; |
323 |
|
324 |
nProc = mpiSim->getNumberProcessors(); |
325 |
potatoes = new int[nProc]; |
326 |
|
327 |
//write out the comment lines |
328 |
for (i = 0; i < nProc; i++) |
329 |
potatoes[i] = 0; |
330 |
|
331 |
for(k = 0; k < outFile.size(); k++){ |
332 |
*outFile[k] << mpiSim->getTotAtoms() << "\n"; |
333 |
|
334 |
*outFile[k] << currentTime << ";\t" |
335 |
<< entry_plug->Hmat[0][0] << "\t" |
336 |
<< entry_plug->Hmat[1][0] << "\t" |
337 |
<< entry_plug->Hmat[2][0] << ";\t" |
338 |
|
339 |
<< entry_plug->Hmat[0][1] << "\t" |
340 |
<< entry_plug->Hmat[1][1] << "\t" |
341 |
<< entry_plug->Hmat[2][1] << ";\t" |
342 |
|
343 |
<< entry_plug->Hmat[0][2] << "\t" |
344 |
<< entry_plug->Hmat[1][2] << "\t" |
345 |
<< entry_plug->Hmat[2][2] << ";"; |
346 |
|
347 |
*outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
348 |
} |
349 |
|
350 |
currentIndex = 0; |
351 |
|
352 |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
353 |
|
354 |
// Get the Node number which has this atom; |
355 |
|
356 |
which_node = AtomToProcMap[i]; |
357 |
|
358 |
if (which_node != 0) { |
359 |
|
360 |
if (potatoes[which_node] + 3 >= MAXTAG) { |
361 |
// The potato was going to exceed the maximum value, |
362 |
// so wrap this processor potato back to 0: |
363 |
|
364 |
potatoes[which_node] = 0; |
365 |
MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD); |
366 |
|
367 |
} |
368 |
|
369 |
myPotato = potatoes[which_node]; |
370 |
|
371 |
MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node, |
372 |
myPotato, MPI_COMM_WORLD, &istatus); |
373 |
|
374 |
atomTypeString = MPIatomTypeString; |
375 |
|
376 |
myPotato++; |
377 |
|
378 |
MPI_Recv(&isDirectional, 1, MPI_INT, which_node, |
379 |
myPotato, MPI_COMM_WORLD, &istatus); |
380 |
|
381 |
myPotato++; |
382 |
|
383 |
if (isDirectional) { |
384 |
MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node, |
385 |
myPotato, MPI_COMM_WORLD, &istatus); |
386 |
} else { |
387 |
MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node, |
388 |
myPotato, MPI_COMM_WORLD, &istatus); |
389 |
} |
390 |
|
391 |
myPotato++; |
392 |
potatoes[which_node] = myPotato; |
393 |
|
394 |
} else { |
395 |
|
396 |
haveError = 0; |
397 |
which_atom = i; |
398 |
|
399 |
//local_index = -1; |
400 |
|
401 |
//for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
402 |
// if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
403 |
//} |
404 |
|
405 |
//if (local_index != -1) { |
406 |
|
407 |
local_index = indexArray[currentIndex].first; |
408 |
|
409 |
if (which_atom == indexArray[currentIndex].second) { |
410 |
|
411 |
atomTypeString = atoms[local_index]->getType(); |
412 |
|
413 |
atoms[local_index]->getPos(pos); |
414 |
atoms[local_index]->getVel(vel); |
415 |
|
416 |
atomData6[0] = pos[0]; |
417 |
atomData6[1] = pos[1]; |
418 |
atomData6[2] = pos[2]; |
419 |
|
420 |
atomData6[3] = vel[0]; |
421 |
atomData6[4] = vel[1]; |
422 |
atomData6[5] = vel[2]; |
423 |
|
424 |
isDirectional = 0; |
425 |
|
426 |
if( atoms[local_index]->isDirectional() ){ |
427 |
|
428 |
isDirectional = 1; |
429 |
|
430 |
dAtom = (DirectionalAtom *)atoms[local_index]; |
431 |
dAtom->getQ( q ); |
432 |
|
433 |
for (int j = 0; j < 6 ; j++) |
434 |
atomData13[j] = atomData6[j]; |
435 |
|
436 |
atomData13[6] = q[0]; |
437 |
atomData13[7] = q[1]; |
438 |
atomData13[8] = q[2]; |
439 |
atomData13[9] = q[3]; |
440 |
|
441 |
atomData13[10] = dAtom->getJx(); |
442 |
atomData13[11] = dAtom->getJy(); |
443 |
atomData13[12] = dAtom->getJz(); |
444 |
} |
445 |
|
446 |
} else { |
447 |
sprintf(painCave.errMsg, |
448 |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
449 |
which_atom, worldRank, currentIndex, local_index ); |
450 |
haveError= 1; |
451 |
simError(); |
452 |
} |
453 |
|
454 |
if(haveError) DieDieDie(); |
455 |
|
456 |
currentIndex++; |
457 |
} |
458 |
// If we've survived to here, format the line: |
459 |
|
460 |
if (!isDirectional) { |
461 |
|
462 |
sprintf( writeLine, |
463 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
464 |
atomTypeString, |
465 |
atomData6[0], |
466 |
atomData6[1], |
467 |
atomData6[2], |
468 |
atomData6[3], |
469 |
atomData6[4], |
470 |
atomData6[5]); |
471 |
|
472 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
473 |
|
474 |
} else { |
475 |
|
476 |
sprintf( writeLine, |
477 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
478 |
atomTypeString, |
479 |
atomData13[0], |
480 |
atomData13[1], |
481 |
atomData13[2], |
482 |
atomData13[3], |
483 |
atomData13[4], |
484 |
atomData13[5], |
485 |
atomData13[6], |
486 |
atomData13[7], |
487 |
atomData13[8], |
488 |
atomData13[9], |
489 |
atomData13[10], |
490 |
atomData13[11], |
491 |
atomData13[12]); |
492 |
|
493 |
} |
494 |
|
495 |
for(k = 0; k < outFile.size(); k++) |
496 |
*outFile[k] << writeLine; |
497 |
} |
498 |
|
499 |
for(k = 0; k < outFile.size(); k++) |
500 |
outFile[k]->flush(); |
501 |
|
502 |
sprintf( checkPointMsg, |
503 |
"Sucessfully took a dump.\n"); |
504 |
|
505 |
MPIcheckPoint(); |
506 |
|
507 |
delete[] potatoes; |
508 |
|
509 |
} else { |
510 |
|
511 |
// worldRank != 0, so I'm a remote node. |
512 |
|
513 |
// Set my magic potato to 0: |
514 |
|
515 |
myPotato = 0; |
516 |
currentIndex = 0; |
517 |
|
518 |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
519 |
|
520 |
// Am I the node which has this atom? |
521 |
|
522 |
if (AtomToProcMap[i] == worldRank) { |
523 |
|
524 |
if (myPotato + 3 >= MAXTAG) { |
525 |
|
526 |
// The potato was going to exceed the maximum value, |
527 |
// so wrap this processor potato back to 0 (and block until |
528 |
// node 0 says we can go: |
529 |
|
530 |
MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus); |
531 |
|
532 |
} |
533 |
which_atom = i; |
534 |
|
535 |
//local_index = -1; |
536 |
|
537 |
//for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
538 |
// if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
539 |
//} |
540 |
|
541 |
//if (local_index != -1) { |
542 |
|
543 |
local_index = indexArray[currentIndex].first; |
544 |
|
545 |
if (which_atom == indexArray[currentIndex].second) { |
546 |
|
547 |
atomTypeString = atoms[local_index]->getType(); |
548 |
|
549 |
atoms[local_index]->getPos(pos); |
550 |
atoms[local_index]->getVel(vel); |
551 |
|
552 |
atomData6[0] = pos[0]; |
553 |
atomData6[1] = pos[1]; |
554 |
atomData6[2] = pos[2]; |
555 |
|
556 |
atomData6[3] = vel[0]; |
557 |
atomData6[4] = vel[1]; |
558 |
atomData6[5] = vel[2]; |
559 |
|
560 |
isDirectional = 0; |
561 |
|
562 |
if( atoms[local_index]->isDirectional() ){ |
563 |
|
564 |
isDirectional = 1; |
565 |
|
566 |
dAtom = (DirectionalAtom *)atoms[local_index]; |
567 |
dAtom->getQ( q ); |
568 |
|
569 |
for (int j = 0; j < 6 ; j++) |
570 |
atomData13[j] = atomData6[j]; |
571 |
|
572 |
atomData13[6] = q[0]; |
573 |
atomData13[7] = q[1]; |
574 |
atomData13[8] = q[2]; |
575 |
atomData13[9] = q[3]; |
576 |
|
577 |
atomData13[10] = dAtom->getJx(); |
578 |
atomData13[11] = dAtom->getJy(); |
579 |
atomData13[12] = dAtom->getJz(); |
580 |
} |
581 |
|
582 |
} else { |
583 |
sprintf(painCave.errMsg, |
584 |
"Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
585 |
which_atom, worldRank, currentIndex, local_index ); |
586 |
haveError= 1; |
587 |
simError(); |
588 |
} |
589 |
|
590 |
strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE); |
591 |
|
592 |
// null terminate the string before sending (just in case): |
593 |
MPIatomTypeString[MINIBUFFERSIZE-1] = '\0'; |
594 |
|
595 |
MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0, |
596 |
myPotato, MPI_COMM_WORLD); |
597 |
|
598 |
myPotato++; |
599 |
|
600 |
MPI_Send(&isDirectional, 1, MPI_INT, 0, |
601 |
myPotato, MPI_COMM_WORLD); |
602 |
|
603 |
myPotato++; |
604 |
|
605 |
if (isDirectional) { |
606 |
|
607 |
MPI_Send(atomData13, 13, MPI_DOUBLE, 0, |
608 |
myPotato, MPI_COMM_WORLD); |
609 |
|
610 |
} else { |
611 |
|
612 |
MPI_Send(atomData6, 6, MPI_DOUBLE, 0, |
613 |
myPotato, MPI_COMM_WORLD); |
614 |
} |
615 |
|
616 |
myPotato++; |
617 |
currentIndex++; |
618 |
} |
619 |
} |
620 |
|
621 |
sprintf( checkPointMsg, |
622 |
"Sucessfully took a dump.\n"); |
623 |
MPIcheckPoint(); |
624 |
|
625 |
} |
626 |
|
627 |
#endif // is_mpi |
628 |
} |
629 |
|
630 |
#ifdef IS_MPI |
631 |
|
632 |
// a couple of functions to let us escape the write loop |
633 |
|
634 |
void dWrite::DieDieDie( void ){ |
635 |
|
636 |
MPI_Finalize(); |
637 |
exit (0); |
638 |
} |
639 |
|
640 |
#endif //is_mpi |