# | Line 11 | Line 11 | |
---|---|---|
11 | #include "ReadWrite.hpp" | |
12 | #include "simError.h" | |
13 | ||
14 | + | #define GAME_OVER -1 |
15 | ||
15 | – | |
16 | – | |
17 | – | |
16 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
17 | ||
18 | entry_plug = the_entry_plug; | |
# | Line 122 | Line 120 | void DumpWriter::writeDump( double currentTime ){ | |
120 | outFile.flush(); | |
121 | ||
122 | #else // is_mpi | |
123 | < | |
126 | < | int masterIndex; |
127 | < | int nodeAtomsStart; |
128 | < | int nodeAtomsEnd; |
129 | < | int mpiErr; |
130 | < | int sendError; |
131 | < | int procIndex; |
132 | < | |
123 | > | |
124 | MPI_Status istatus[MPI_STATUS_SIZE]; | |
125 | < | |
135 | < | |
125 | > | |
126 | // write out header and node 0's coordinates | |
127 | < | |
127 | > | |
128 | if( worldRank == 0 ){ | |
129 | outFile << mpiSim->getTotAtoms() << "\n"; | |
130 | < | |
130 | > | |
131 | outFile << currentTime << "\t" | |
132 | << entry_plug->box_x << "\t" | |
133 | << entry_plug->box_y << "\t" | |
134 | << entry_plug->box_z << "\n"; | |
135 | < | |
136 | < | masterIndex = 0; |
137 | < | for( i=0; i<nAtoms; i++ ){ |
135 | > | |
136 | > | for (i = 0 ; i < mpiPlug->nAtomsGlobal; i++ ) { |
137 | > | // Get the Node number which has this molecule: |
138 | ||
139 | < | sprintf( tempBuffer, |
140 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
141 | < | atoms[i]->getType(), |
142 | < | atoms[i]->getX(), |
143 | < | atoms[i]->getY(), |
144 | < | atoms[i]->getZ(), |
145 | < | atoms[i]->get_vx(), |
146 | < | atoms[i]->get_vy(), |
147 | < | atoms[i]->get_vz()); |
148 | < | strcpy( writeLine, tempBuffer ); |
139 | > | which_node = AtomToProcMap[i]; |
140 | > | |
141 | > | if (which_node == mpiPlug->myNode) { |
142 | > | |
143 | > | sprintf( tempBuffer, |
144 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
145 | > | atoms[i]->getType(), |
146 | > | atoms[i]->getX(), |
147 | > | atoms[i]->getY(), |
148 | > | atoms[i]->getZ(), |
149 | > | atoms[i]->get_vx(), |
150 | > | atoms[i]->get_vy(), |
151 | > | atoms[i]->get_vz()); |
152 | > | strcpy( writeLine, tempBuffer ); |
153 | ||
154 | < | if( atoms[i]->isDirectional() ){ |
154 | > | if( atoms[i]->isDirectional() ){ |
155 | ||
156 | < | dAtom = (DirectionalAtom *)atoms[i]; |
157 | < | dAtom->getQ( q ); |
156 | > | dAtom = (DirectionalAtom *)atoms[i]; |
157 | > | dAtom->getQ( q ); |
158 | ||
159 | < | sprintf( tempBuffer, |
160 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
161 | < | q[0], |
162 | < | q[1], |
163 | < | q[2], |
164 | < | q[3], |
165 | < | dAtom->getJx(), |
166 | < | dAtom->getJy(), |
167 | < | dAtom->getJz()); |
168 | < | strcat( writeLine, tempBuffer ); |
159 | > | sprintf( tempBuffer, |
160 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
161 | > | q[0], |
162 | > | q[1], |
163 | > | q[2], |
164 | > | q[3], |
165 | > | dAtom->getJx(), |
166 | > | dAtom->getJy(), |
167 | > | dAtom->getJz()); |
168 | > | strcat( writeLine, tempBuffer ); |
169 | > | } |
170 | > | else |
171 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
172 | > | |
173 | > | } else { |
174 | > | |
175 | > | MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG); |
176 | > | MPI::COMM_WORLD.Receive(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
177 | > | TAKE_THIS_TAG, istatus); |
178 | } | |
179 | < | else |
177 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
178 | < | |
179 | > | |
180 | outFile << writeLine; | |
180 | – | masterIndex++; |
181 | } | |
182 | – | outFile.flush(); |
183 | – | } |
184 | – | |
185 | – | sprintf( checkPointMsg, |
186 | – | "Sucessfully wrote node 0's dump configuration.\n"); |
187 | – | MPIcheckPoint(); |
182 | ||
183 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
184 | < | procIndex++){ |
185 | < | |
192 | < | if( worldRank == 0 ){ |
193 | < | |
194 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
195 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
196 | < | |
197 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
199 | < | |
200 | < | // Make sure where node 0 is writing to, matches where the |
201 | < | // receiving node expects it to be. |
202 | < | |
203 | < | if (masterIndex != nodeAtomsStart){ |
204 | < | sendError = 1; |
205 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 | < | MPI_COMM_WORLD); |
207 | < | sprintf(painCave.errMsg, |
208 | < | "DumpWriter error: atoms start index (%d) for " |
209 | < | "node %d not equal to master index (%d)", |
210 | < | nodeAtomsStart,procIndex,masterIndex ); |
211 | < | painCave.isFatal = 1; |
212 | < | simError(); |
213 | < | } |
214 | < | |
215 | < | sendError = 0; |
216 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 | < | MPI_COMM_WORLD); |
218 | < | |
219 | < | // recieve the nodes writeLines |
220 | < | |
221 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 | < | |
223 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 | < | |
226 | < | outFile << writeLine; |
227 | < | masterIndex++; |
228 | < | } |
183 | > | // kill everyone off: |
184 | > | for (j = 0; j < mpiPlug->numberProcessors; j++) { |
185 | > | MPI::COMM_WORLD.Send(GAME_OVER, 1, MPI_INT, j, TAKE_THIS_TAG); |
186 | } | |
187 | ||
188 | < | else if( worldRank == procIndex ){ |
188 | > | } else { |
189 | > | |
190 | > | done = 0; |
191 | > | while (!done) { |
192 | > | MPI::COMM_WORLD.Receive(&which_atom, 1, MPI_INT, 0, |
193 | > | TAKE_THIS_TAG, istatus); |
194 | ||
195 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
196 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
195 | > | if (which_atom == GAME_OVER) { |
196 | > | done=1; |
197 | > | continue; |
198 | > | } else { |
199 | ||
200 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 | < | MPI_COMM_WORLD); |
238 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 | < | MPI_COMM_WORLD); |
240 | < | |
241 | < | sendError = -1; |
242 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 | < | MPI_COMM_WORLD, istatus); |
244 | < | |
245 | < | if (sendError) MPIcheckPoint(); |
246 | < | |
247 | < | // send current node's configuration line by line. |
248 | < | |
249 | < | for( i=0; i<nAtoms; i++ ){ |
250 | < | |
200 | > | //format the line |
201 | sprintf( tempBuffer, | |
202 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
203 | < | atoms[i]->getType(), |
204 | < | atoms[i]->getX(), |
205 | < | atoms[i]->getY(), |
206 | < | atoms[i]->getZ(), |
207 | < | atoms[i]->get_vx(), |
208 | < | atoms[i]->get_vy(), |
209 | < | atoms[i]->get_vz()); // check here. |
203 | > | atoms[which_atom]->getType(), |
204 | > | atoms[which_atom]->getX(), |
205 | > | atoms[which_atom]->getY(), |
206 | > | atoms[which_atom]->getZ(), |
207 | > | atoms[which_atom]->get_vx(), |
208 | > | atoms[which_atom]->get_vy(), |
209 | > | atoms[which_atom]->get_vz()); // check here. |
210 | strcpy( writeLine, tempBuffer ); | |
211 | ||
212 | < | if( atoms[i]->isDirectional() ){ |
212 | > | if( atoms[which_atom]->isDirectional() ){ |
213 | ||
214 | < | dAtom = (DirectionalAtom *)atoms[i]; |
214 | > | dAtom = (DirectionalAtom *)atoms[which_atom]; |
215 | dAtom->getQ( q ); | |
216 | ||
217 | sprintf( tempBuffer, | |
# | Line 277 | Line 227 | void DumpWriter::writeDump( double currentTime ){ | |
227 | } | |
228 | else | |
229 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
230 | < | |
231 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
232 | < | MPI_COMM_WORLD); |
230 | > | |
231 | > | MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
232 | > | TAKE_THIS_TAG); |
233 | } | |
234 | } | |
235 | < | |
236 | < | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
237 | < | procIndex); |
238 | < | MPIcheckPoint(); |
239 | < | } |
290 | < | |
235 | > | } |
236 | > | outFile.flush(); |
237 | > | sprintf( checkPointMsg, |
238 | > | "Sucessfully took a dump.\n"); |
239 | > | MPIcheckPoint(); |
240 | #endif // is_mpi | |
241 | } | |
242 | ||
294 | – | |
295 | – | |
243 | void DumpWriter::writeFinal(){ | |
297 | – | |
298 | – | |
299 | – | const int BUFFERSIZE = 2000; |
300 | – | char tempBuffer[500]; |
301 | – | char writeLine[BUFFERSIZE]; |
244 | ||
245 | char finalName[500]; | |
304 | – | |
305 | – | int i; |
306 | – | double q[4]; |
307 | – | DirectionalAtom* dAtom; |
308 | – | int nAtoms = entry_plug->n_atoms; |
309 | – | Atom** atoms = entry_plug->atoms; |
310 | – | |
246 | ofstream finalOut; | |
247 | ||
248 | #ifdef IS_MPI | |
# | Line 335 | Line 270 | void DumpWriter::writeFinal(){ | |
270 | ||
271 | #endif //is_mpi | |
272 | ||
273 | < | |
339 | < | |
273 | > | |
274 | #ifndef IS_MPI | |
275 | ||
276 | finalOut << nAtoms << "\n"; | |
# | Line 380 | Line 314 | void DumpWriter::writeFinal(){ | |
314 | finalOut << writeLine; | |
315 | } | |
316 | finalOut.flush(); | |
317 | + | finalOut.close(); |
318 | ||
319 | #else // is_mpi | |
320 | < | |
386 | < | int masterIndex; |
387 | < | int nodeAtomsStart; |
388 | < | int nodeAtomsEnd; |
389 | < | int mpiErr; |
390 | < | int sendError; |
391 | < | int procIndex; |
392 | < | |
320 | > | |
321 | MPI_Status istatus[MPI_STATUS_SIZE]; | |
322 | < | |
395 | < | |
322 | > | |
323 | // write out header and node 0's coordinates | |
324 | < | |
324 | > | |
325 | if( worldRank == 0 ){ | |
326 | finalOut << mpiSim->getTotAtoms() << "\n"; | |
327 | < | |
327 | > | |
328 | finalOut << entry_plug->box_x << "\t" | |
329 | < | << entry_plug->box_y << "\t" |
330 | < | << entry_plug->box_z << "\n"; |
329 | > | << entry_plug->box_y << "\t" |
330 | > | << entry_plug->box_z << "\n"; |
331 | ||
332 | < | masterIndex = 0; |
333 | < | |
407 | < | for( i=0; i<nAtoms; i++ ){ |
332 | > | for (i = 0 ; i < mpiPlug->nAtomsGlobal; i++ ) { |
333 | > | // Get the Node number which has this molecule: |
334 | ||
335 | < | sprintf( tempBuffer, |
336 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
337 | < | atoms[i]->getType(), |
338 | < | atoms[i]->getX(), |
339 | < | atoms[i]->getY(), |
340 | < | atoms[i]->getZ(), |
341 | < | atoms[i]->get_vx(), |
342 | < | atoms[i]->get_vy(), |
343 | < | atoms[i]->get_vz()); |
344 | < | strcpy( writeLine, tempBuffer ); |
335 | > | which_node = AtomToProcMap[i]; |
336 | > | |
337 | > | if (which_node == mpiPlug->myNode) { |
338 | > | |
339 | > | sprintf( tempBuffer, |
340 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
341 | > | atoms[i]->getType(), |
342 | > | atoms[i]->getX(), |
343 | > | atoms[i]->getY(), |
344 | > | atoms[i]->getZ(), |
345 | > | atoms[i]->get_vx(), |
346 | > | atoms[i]->get_vy(), |
347 | > | atoms[i]->get_vz()); |
348 | > | strcpy( writeLine, tempBuffer ); |
349 | ||
350 | < | if( atoms[i]->isDirectional() ){ |
350 | > | if( atoms[i]->isDirectional() ){ |
351 | ||
352 | < | dAtom = (DirectionalAtom *)atoms[i]; |
353 | < | dAtom->getQ( q ); |
352 | > | dAtom = (DirectionalAtom *)atoms[i]; |
353 | > | dAtom->getQ( q ); |
354 | ||
355 | < | sprintf( tempBuffer, |
356 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
357 | < | q[0], |
358 | < | q[1], |
359 | < | q[2], |
360 | < | q[3], |
361 | < | dAtom->getJx(), |
362 | < | dAtom->getJy(), |
363 | < | dAtom->getJz()); |
364 | < | strcat( writeLine, tempBuffer ); |
355 | > | sprintf( tempBuffer, |
356 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
357 | > | q[0], |
358 | > | q[1], |
359 | > | q[2], |
360 | > | q[3], |
361 | > | dAtom->getJx(), |
362 | > | dAtom->getJy(), |
363 | > | dAtom->getJz()); |
364 | > | strcat( writeLine, tempBuffer ); |
365 | > | } |
366 | > | else |
367 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
368 | > | |
369 | > | } else { |
370 | > | |
371 | > | MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG); |
372 | > | MPI::COMM_WORLD.Receive(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
373 | > | TAKE_THIS_TAG, istatus); |
374 | } | |
375 | < | else |
437 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
438 | < | |
375 | > | |
376 | finalOut << writeLine; | |
440 | – | masterIndex++; |
377 | } | |
442 | – | finalOut.flush(); |
443 | – | } |
378 | ||
379 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
380 | < | procIndex++){ |
381 | < | |
448 | < | if( worldRank == 0 ){ |
449 | < | |
450 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
451 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
452 | < | |
453 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
454 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
455 | < | |
456 | < | // Make sure where node 0 is writing to, matches where the |
457 | < | // receiving node expects it to be. |
458 | < | |
459 | < | if (masterIndex != nodeAtomsStart){ |
460 | < | sendError = 1; |
461 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
462 | < | MPI_COMM_WORLD); |
463 | < | sprintf(painCave.errMsg, |
464 | < | "DumpWriter error: atoms start index (%d) for " |
465 | < | "node %d not equal to master index (%d)", |
466 | < | nodeAtomsStart,procIndex,masterIndex ); |
467 | < | painCave.isFatal = 1; |
468 | < | simError(); |
469 | < | } |
470 | < | |
471 | < | sendError = 0; |
472 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 | < | MPI_COMM_WORLD); |
474 | < | |
475 | < | // recieve the nodes writeLines |
476 | < | |
477 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 | < | |
479 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 | < | |
482 | < | finalOut << writeLine; |
483 | < | masterIndex++; |
484 | < | } |
485 | < | |
486 | < | finalOut.flush(); |
379 | > | // kill everyone off: |
380 | > | for (j = 0; j < mpiPlug->numberProcessors; j++) { |
381 | > | MPI::COMM_WORLD.Send(GAME_OVER, 1, MPI_INT, j, TAKE_THIS_TAG); |
382 | } | |
383 | ||
384 | < | else if( worldRank == procIndex ){ |
384 | > | } else { |
385 | > | |
386 | > | done = 0; |
387 | > | while (!done) { |
388 | > | MPI::COMM_WORLD.Receive(&which_atom, 1, MPI_INT, 0, |
389 | > | TAKE_THIS_TAG, istatus); |
390 | ||
391 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
392 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
393 | < | |
394 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 | < | MPI_COMM_WORLD); |
496 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 | < | MPI_COMM_WORLD); |
498 | < | |
499 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 | < | MPI_COMM_WORLD, istatus); |
501 | < | if (sendError) MPIcheckPoint(); |
391 | > | if (which_atom == GAME_OVER) { |
392 | > | done=1; |
393 | > | continue; |
394 | > | } else { |
395 | ||
396 | < | // send current node's configuration line by line. |
504 | < | |
505 | < | for( i=0; i<nAtoms; i++ ){ |
506 | < | |
396 | > | //format the line |
397 | sprintf( tempBuffer, | |
398 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
399 | < | atoms[i]->getType(), |
400 | < | atoms[i]->getX(), |
401 | < | atoms[i]->getY(), |
402 | < | atoms[i]->getZ(), |
403 | < | atoms[i]->get_vx(), |
404 | < | atoms[i]->get_vy(), |
405 | < | atoms[i]->get_vz()); |
399 | > | atoms[which_atom]->getType(), |
400 | > | atoms[which_atom]->getX(), |
401 | > | atoms[which_atom]->getY(), |
402 | > | atoms[which_atom]->getZ(), |
403 | > | atoms[which_atom]->get_vx(), |
404 | > | atoms[which_atom]->get_vy(), |
405 | > | atoms[which_atom]->get_vz()); // check here. |
406 | strcpy( writeLine, tempBuffer ); | |
407 | ||
408 | < | if( atoms[i]->isDirectional() ){ |
408 | > | if( atoms[which_atom]->isDirectional() ){ |
409 | ||
410 | < | dAtom = (DirectionalAtom *)atoms[i]; |
410 | > | dAtom = (DirectionalAtom *)atoms[which_atom]; |
411 | dAtom->getQ( q ); | |
412 | ||
413 | sprintf( tempBuffer, | |
# | Line 533 | Line 423 | void DumpWriter::writeFinal(){ | |
423 | } | |
424 | else | |
425 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
426 | < | |
427 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
428 | < | MPI_COMM_WORLD); |
426 | > | |
427 | > | MPI::COMM_WORLD.Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
428 | > | TAKE_THIS_TAG); |
429 | } | |
430 | } | |
431 | < | |
432 | < | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
433 | < | procIndex); |
434 | < | MPIcheckPoint(); |
435 | < | } |
431 | > | } |
432 | > | finalOut.flush(); |
433 | > | sprintf( checkPointMsg, |
434 | > | "Sucessfully took a dump.\n"); |
435 | > | MPIcheckPoint(); |
436 | ||
437 | < | if( worldRank == 0 ) finalOut.close(); |
548 | < | |
549 | < | |
437 | > | if( worldRank == 0 ) finalOut.close(); |
438 | #endif // is_mpi | |
439 | } |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |