1 |
#include <cstring> |
2 |
#include <iostream> |
3 |
#include <fstream> |
4 |
|
5 |
#ifdef IS_MPI |
6 |
#include <mpi.h> |
7 |
#include "mpiSimulation.hpp" |
8 |
#define TAKE_THIS_TAG 0 |
9 |
#endif //is_mpi |
10 |
|
11 |
#include "ReadWrite.hpp" |
12 |
#include "simError.h" |
13 |
|
14 |
|
15 |
|
16 |
|
17 |
|
18 |
DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ |
19 |
|
20 |
#ifdef IS_MPI |
21 |
if(worldRank == 0 ){ |
22 |
#endif // is_mpi |
23 |
|
24 |
entry_plug = the_entry_plug; |
25 |
|
26 |
strcpy( outName, entry_plug->sampleName ); |
27 |
|
28 |
std::cerr << "Opening " << outName << " for dumping.\n"; |
29 |
|
30 |
outFile.open(outName, ios::out | ios::trunc ); |
31 |
|
32 |
if( !outFile ){ |
33 |
|
34 |
sprintf( painCave.errMsg, |
35 |
"Could not open \"%s\" for dump output.\n", |
36 |
outName); |
37 |
painCave.isFatal = 1; |
38 |
simError(); |
39 |
} |
40 |
|
41 |
//outFile.setf( ios::scientific ); |
42 |
|
43 |
#ifdef IS_MPI |
44 |
} |
45 |
|
46 |
sprintf( checkPointMsg, |
47 |
"Sucessfully opened output file for dumping.\n"); |
48 |
MPIcheckPoint(); |
49 |
#endif // is_mpi |
50 |
} |
51 |
|
52 |
DumpWriter::~DumpWriter( ){ |
53 |
|
54 |
#ifdef IS_MPI |
55 |
if(worldRank == 0 ){ |
56 |
#endif // is_mpi |
57 |
|
58 |
outFile.close(); |
59 |
|
60 |
#ifdef IS_MPI |
61 |
} |
62 |
#endif // is_mpi |
63 |
} |
64 |
|
65 |
void DumpWriter::writeDump( double currentTime ){ |
66 |
|
67 |
const int BUFFERSIZE = 2000; |
68 |
char tempBuffer[BUFFERSIZE]; |
69 |
char writeLine[BUFFERSIZE]; |
70 |
|
71 |
int i; |
72 |
double q[4]; |
73 |
DirectionalAtom* dAtom; |
74 |
int nAtoms = entry_plug->n_atoms; |
75 |
Atom** atoms = entry_plug->atoms; |
76 |
|
77 |
|
78 |
#ifndef IS_MPI |
79 |
|
80 |
outFile << nAtoms << "\n"; |
81 |
|
82 |
outFile << currentTime << "\t" |
83 |
<< entry_plug->box_x << "\t" |
84 |
<< entry_plug->box_y << "\t" |
85 |
<< entry_plug->box_z << "\n"; |
86 |
|
87 |
for( i=0; i<nAtoms; i++ ){ |
88 |
|
89 |
sprintf( tempBuffer, |
90 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
91 |
atoms[i]->getType(), |
92 |
atoms[i]->getX(), |
93 |
atoms[i]->getY(), |
94 |
atoms[i]->getZ(), |
95 |
atoms[i]->get_vx(), |
96 |
atoms[i]->get_vy(), |
97 |
atoms[i]->get_vz()); |
98 |
strcpy( writeLine, tempBuffer ); |
99 |
|
100 |
if( atoms[i]->isDirectional() ){ |
101 |
|
102 |
dAtom = (DirectionalAtom *)atoms[i]; |
103 |
dAtom->getQ( q ); |
104 |
|
105 |
sprintf( tempBuffer, |
106 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
107 |
q[0], |
108 |
q[1], |
109 |
q[2], |
110 |
q[3], |
111 |
dAtom->getJx(), |
112 |
dAtom->getJy(), |
113 |
dAtom->getJz()); |
114 |
strcat( writeLine, tempBuffer ); |
115 |
} |
116 |
else |
117 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
118 |
|
119 |
outFile << writeLine; |
120 |
} |
121 |
outFile.flush(); |
122 |
|
123 |
#else // is_mpi |
124 |
|
125 |
int masterIndex; |
126 |
int nodeAtomsStart; |
127 |
int nodeAtomsEnd; |
128 |
int mpiErr; |
129 |
int sendError; |
130 |
int procIndex; |
131 |
|
132 |
MPI_Status istatus[MPI_STATUS_SIZE]; |
133 |
|
134 |
|
135 |
// write out header and node 0's coordinates |
136 |
|
137 |
if( worldRank == 0 ){ |
138 |
outFile << mpiSim->getTotAtoms() << "\n"; |
139 |
|
140 |
outFile << currentTime << "\t" |
141 |
<< entry_plug->box_x << "\t" |
142 |
<< entry_plug->box_y << "\t" |
143 |
<< entry_plug->box_z << "\n"; |
144 |
|
145 |
masterIndex = 0; |
146 |
for( i=0; i<nAtoms; i++ ){ |
147 |
|
148 |
sprintf( tempBuffer, |
149 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
150 |
atoms[i]->getType(), |
151 |
atoms[i]->getX(), |
152 |
atoms[i]->getY(), |
153 |
atoms[i]->getZ(), |
154 |
atoms[i]->get_vx(), |
155 |
atoms[i]->get_vy(), |
156 |
atoms[i]->get_vz()); |
157 |
strcpy( writeLine, tempBuffer ); |
158 |
|
159 |
if( atoms[i]->isDirectional() ){ |
160 |
|
161 |
dAtom = (DirectionalAtom *)atoms[i]; |
162 |
dAtom->getQ( q ); |
163 |
|
164 |
sprintf( tempBuffer, |
165 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
166 |
q[0], |
167 |
q[1], |
168 |
q[2], |
169 |
q[3], |
170 |
dAtom->getJx(), |
171 |
dAtom->getJy(), |
172 |
dAtom->getJz()); |
173 |
strcat( writeLine, tempBuffer ); |
174 |
} |
175 |
else |
176 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
177 |
|
178 |
outFile << writeLine; |
179 |
masterIndex++; |
180 |
} |
181 |
outFile.flush(); |
182 |
} |
183 |
|
184 |
sprintf( checkPointMsg, |
185 |
"Sucessfully wrote node 0's dump configuration.\n"); |
186 |
MPIcheckPoint(); |
187 |
|
188 |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
189 |
procIndex++){ |
190 |
|
191 |
if( worldRank == 0 ){ |
192 |
|
193 |
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
194 |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
195 |
|
196 |
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
197 |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
198 |
|
199 |
// Make sure where node 0 is writing to, matches where the |
200 |
// receiving node expects it to be. |
201 |
|
202 |
if (masterIndex != nodeAtomsStart){ |
203 |
sendError = 1; |
204 |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
205 |
MPI_COMM_WORLD); |
206 |
sprintf(painCave.errMsg, |
207 |
"DumpWriter error: atoms start index (%d) for " |
208 |
"node %d not equal to master index (%d)", |
209 |
nodeAtomsStart,procIndex,masterIndex ); |
210 |
painCave.isFatal = 1; |
211 |
simError(); |
212 |
} |
213 |
|
214 |
sendError = 0; |
215 |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
216 |
MPI_COMM_WORLD); |
217 |
|
218 |
// recieve the nodes writeLines |
219 |
|
220 |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
221 |
|
222 |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
223 |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
224 |
|
225 |
outFile << writeLine; |
226 |
masterIndex++; |
227 |
} |
228 |
} |
229 |
|
230 |
else if( worldRank == procIndex ){ |
231 |
|
232 |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
233 |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
234 |
|
235 |
fprintf( stderr, |
236 |
"node %d: myatomStart-> %d; myatomEnd-> %d\n", |
237 |
worldRank, nodeAtomsStart, nodeAtomsEnd ); |
238 |
|
239 |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
240 |
MPI_COMM_WORLD); |
241 |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
242 |
MPI_COMM_WORLD); |
243 |
|
244 |
fprintf( stderr, "node %d: sent off the start and end\n", worldRank ); |
245 |
|
246 |
sendError = -1; |
247 |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
248 |
MPI_COMM_WORLD, istatus); |
249 |
|
250 |
fprintf( stderr, "node %d: value of sendError is %d\n", worldRank, sendError ); |
251 |
|
252 |
if (sendError) MPIcheckPoint(); |
253 |
|
254 |
// send current node's configuration line by line. |
255 |
|
256 |
for( i=0; i<nAtoms; i++ ){ |
257 |
|
258 |
sprintf( tempBuffer, |
259 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
260 |
atoms[i]->getType(), |
261 |
atoms[i]->getX(), |
262 |
atoms[i]->getY(), |
263 |
atoms[i]->getZ(), |
264 |
atoms[i]->get_vx(), |
265 |
atoms[i]->get_vy(), |
266 |
atoms[i]->get_vz()); |
267 |
strcpy( writeLine, tempBuffer ); |
268 |
|
269 |
if( atoms[i]->isDirectional() ){ |
270 |
|
271 |
dAtom = (DirectionalAtom *)atoms[i]; |
272 |
dAtom->getQ( q ); |
273 |
|
274 |
sprintf( tempBuffer, |
275 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
276 |
q[0], |
277 |
q[1], |
278 |
q[2], |
279 |
q[3], |
280 |
dAtom->getJx(), |
281 |
dAtom->getJy(), |
282 |
dAtom->getJz()); |
283 |
strcat( writeLine, tempBuffer ); |
284 |
} |
285 |
else |
286 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
287 |
|
288 |
fprintf( stderr, |
289 |
"node %d: I'm sending the line:\n->%s\n", worldRank, writeLine ); |
290 |
|
291 |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
292 |
MPI_COMM_WORLD); |
293 |
} |
294 |
} |
295 |
|
296 |
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
297 |
procIndex); |
298 |
MPIcheckPoint(); |
299 |
} |
300 |
|
301 |
#endif // is_mpi |
302 |
} |
303 |
|
304 |
|
305 |
|
306 |
void DumpWriter::writeFinal(){ |
307 |
|
308 |
|
309 |
const int BUFFERSIZE = 2000; |
310 |
char tempBuffer[500]; |
311 |
char writeLine[BUFFERSIZE]; |
312 |
|
313 |
char finalName[500]; |
314 |
|
315 |
int i; |
316 |
double q[4]; |
317 |
DirectionalAtom* dAtom; |
318 |
int nAtoms = entry_plug->n_atoms; |
319 |
Atom** atoms = entry_plug->atoms; |
320 |
|
321 |
ofstream finalOut; |
322 |
|
323 |
#ifdef IS_MPI |
324 |
if(worldRank == 0 ){ |
325 |
#endif // is_mpi |
326 |
|
327 |
strcpy( finalName, entry_plug->finalName ); |
328 |
|
329 |
finalOut.open( finalName, ios::out | ios::trunc ); |
330 |
if( !finalOut ){ |
331 |
sprintf( painCave.errMsg, |
332 |
"Could not open \"%s\" for final dump output.\n", |
333 |
finalName ); |
334 |
painCave.isFatal = 1; |
335 |
simError(); |
336 |
} |
337 |
|
338 |
// finalOut.setf( ios::scientific ); |
339 |
|
340 |
#ifdef IS_MPI |
341 |
} |
342 |
|
343 |
sprintf(checkPointMsg,"Opened file for final configuration\n"); |
344 |
MPIcheckPoint(); |
345 |
|
346 |
#endif //is_mpi |
347 |
|
348 |
|
349 |
|
350 |
#ifndef IS_MPI |
351 |
|
352 |
finalOut << nAtoms << "\n"; |
353 |
|
354 |
finalOut << entry_plug->box_x << "\t" |
355 |
<< entry_plug->box_y << "\t" |
356 |
<< entry_plug->box_z << "\n"; |
357 |
|
358 |
for( i=0; i<nAtoms; i++ ){ |
359 |
|
360 |
sprintf( tempBuffer, |
361 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
362 |
atoms[i]->getType(), |
363 |
atoms[i]->getX(), |
364 |
atoms[i]->getY(), |
365 |
atoms[i]->getZ(), |
366 |
atoms[i]->get_vx(), |
367 |
atoms[i]->get_vy(), |
368 |
atoms[i]->get_vz()); |
369 |
strcpy( writeLine, tempBuffer ); |
370 |
|
371 |
if( atoms[i]->isDirectional() ){ |
372 |
|
373 |
dAtom = (DirectionalAtom *)atoms[i]; |
374 |
dAtom->getQ( q ); |
375 |
|
376 |
sprintf( tempBuffer, |
377 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
378 |
q[0], |
379 |
q[1], |
380 |
q[2], |
381 |
q[3], |
382 |
dAtom->getJx(), |
383 |
dAtom->getJy(), |
384 |
dAtom->getJz()); |
385 |
strcat( writeLine, tempBuffer ); |
386 |
} |
387 |
else |
388 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
389 |
|
390 |
finalOut << writeLine; |
391 |
} |
392 |
finalOut.flush(); |
393 |
|
394 |
#else // is_mpi |
395 |
|
396 |
int masterIndex; |
397 |
int nodeAtomsStart; |
398 |
int nodeAtomsEnd; |
399 |
int mpiErr; |
400 |
int sendError; |
401 |
int procIndex; |
402 |
|
403 |
MPI_Status istatus[MPI_STATUS_SIZE]; |
404 |
|
405 |
|
406 |
// write out header and node 0's coordinates |
407 |
|
408 |
if( worldRank == 0 ){ |
409 |
finalOut << mpiSim->getTotAtoms() << "\n"; |
410 |
|
411 |
finalOut << entry_plug->box_x << "\t" |
412 |
<< entry_plug->box_y << "\t" |
413 |
<< entry_plug->box_z << "\n"; |
414 |
|
415 |
masterIndex = 0; |
416 |
|
417 |
std::cerr << "about to write node 0 aztoms. nAtoms = " << nAtoms << "\n"; |
418 |
|
419 |
for( i=0; i<nAtoms; i++ ){ |
420 |
|
421 |
sprintf( tempBuffer, |
422 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
423 |
atoms[i]->getType(), |
424 |
atoms[i]->getX(), |
425 |
atoms[i]->getY(), |
426 |
atoms[i]->getZ(), |
427 |
atoms[i]->get_vx(), |
428 |
atoms[i]->get_vy(), |
429 |
atoms[i]->get_vz()); |
430 |
strcpy( writeLine, tempBuffer ); |
431 |
|
432 |
if( atoms[i]->isDirectional() ){ |
433 |
|
434 |
dAtom = (DirectionalAtom *)atoms[i]; |
435 |
dAtom->getQ( q ); |
436 |
|
437 |
sprintf( tempBuffer, |
438 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
439 |
q[0], |
440 |
q[1], |
441 |
q[2], |
442 |
q[3], |
443 |
dAtom->getJx(), |
444 |
dAtom->getJy(), |
445 |
dAtom->getJz()); |
446 |
strcat( writeLine, tempBuffer ); |
447 |
} |
448 |
else |
449 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
450 |
|
451 |
finalOut << writeLine; |
452 |
masterIndex++; |
453 |
} |
454 |
finalOut.flush(); |
455 |
} |
456 |
|
457 |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
458 |
procIndex++){ |
459 |
|
460 |
if( worldRank == 0 ){ |
461 |
|
462 |
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
463 |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
464 |
|
465 |
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
466 |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
467 |
|
468 |
// Make sure where node 0 is writing to, matches where the |
469 |
// receiving node expects it to be. |
470 |
|
471 |
if (masterIndex != nodeAtomsStart){ |
472 |
sendError = 1; |
473 |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
474 |
MPI_COMM_WORLD); |
475 |
sprintf(painCave.errMsg, |
476 |
"DumpWriter error: atoms start index (%d) for " |
477 |
"node %d not equal to master index (%d)", |
478 |
nodeAtomsStart,procIndex,masterIndex ); |
479 |
painCave.isFatal = 1; |
480 |
simError(); |
481 |
} |
482 |
|
483 |
sendError = 0; |
484 |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
485 |
MPI_COMM_WORLD); |
486 |
|
487 |
// recieve the nodes writeLines |
488 |
|
489 |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
490 |
|
491 |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
492 |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
493 |
|
494 |
finalOut << writeLine; |
495 |
masterIndex++; |
496 |
} |
497 |
|
498 |
finalOut.flush(); |
499 |
} |
500 |
|
501 |
else if( worldRank == procIndex ){ |
502 |
|
503 |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
504 |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
505 |
|
506 |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
507 |
MPI_COMM_WORLD); |
508 |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
509 |
MPI_COMM_WORLD); |
510 |
|
511 |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
512 |
MPI_COMM_WORLD, istatus); |
513 |
if (sendError) MPIcheckPoint(); |
514 |
|
515 |
// send current node's configuration line by line. |
516 |
|
517 |
for( i=0; i<nAtoms; i++ ){ |
518 |
|
519 |
sprintf( tempBuffer, |
520 |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
521 |
atoms[i]->getType(), |
522 |
atoms[i]->getX(), |
523 |
atoms[i]->getY(), |
524 |
atoms[i]->getZ(), |
525 |
atoms[i]->get_vx(), |
526 |
atoms[i]->get_vy(), |
527 |
atoms[i]->get_vz()); |
528 |
strcpy( writeLine, tempBuffer ); |
529 |
|
530 |
if( atoms[i]->isDirectional() ){ |
531 |
|
532 |
dAtom = (DirectionalAtom *)atoms[i]; |
533 |
dAtom->getQ( q ); |
534 |
|
535 |
sprintf( tempBuffer, |
536 |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
537 |
q[0], |
538 |
q[1], |
539 |
q[2], |
540 |
q[3], |
541 |
dAtom->getJx(), |
542 |
dAtom->getJy(), |
543 |
dAtom->getJz()); |
544 |
strcat( writeLine, tempBuffer ); |
545 |
} |
546 |
else |
547 |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
548 |
|
549 |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
550 |
MPI_COMM_WORLD); |
551 |
} |
552 |
} |
553 |
|
554 |
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
555 |
procIndex); |
556 |
MPIcheckPoint(); |
557 |
} |
558 |
|
559 |
if( worldRank == 0 ) finalOut.close(); |
560 |
|
561 |
|
562 |
#endif // is_mpi |
563 |
} |