5 |
|
#ifdef IS_MPI |
6 |
|
#include <mpi.h> |
7 |
|
#include "mpiSimulation.hpp" |
8 |
< |
#define TAKE_THIS_TAG 0 |
8 |
> |
#define TAKE_THIS_TAG_CHAR 1 |
9 |
> |
#define TAKE_THIS_TAG_INT 2 |
10 |
> |
|
11 |
> |
namespace dWrite{ |
12 |
> |
void nodeZeroError( void ); |
13 |
> |
void anonymousNodeDie( void ); |
14 |
> |
} |
15 |
> |
|
16 |
> |
using namespace dWrite; |
17 |
|
#endif //is_mpi |
18 |
|
|
19 |
|
#include "ReadWrite.hpp" |
20 |
|
#include "simError.h" |
21 |
|
|
14 |
– |
|
15 |
– |
|
16 |
– |
|
17 |
– |
|
22 |
|
DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ |
23 |
|
|
24 |
|
entry_plug = the_entry_plug; |
72 |
|
char tempBuffer[BUFFERSIZE]; |
73 |
|
char writeLine[BUFFERSIZE]; |
74 |
|
|
75 |
< |
int i; |
75 |
> |
int i, j, which_node, done, game_over, which_atom, local_index; |
76 |
|
double q[4]; |
77 |
|
DirectionalAtom* dAtom; |
78 |
|
int nAtoms = entry_plug->n_atoms; |
127 |
|
|
128 |
|
#else // is_mpi |
129 |
|
|
130 |
< |
int masterIndex; |
131 |
< |
int nodeAtomsStart; |
128 |
< |
int nodeAtomsEnd; |
129 |
< |
int mpiErr; |
130 |
< |
int sendError; |
131 |
< |
int procIndex; |
132 |
< |
|
133 |
< |
MPI_Status istatus[MPI_STATUS_SIZE]; |
130 |
> |
// first thing first, suspend fatalities. |
131 |
> |
painCave.isEventLoop = 1; |
132 |
|
|
133 |
< |
|
134 |
< |
// write out header and node 0's coordinates |
133 |
> |
int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
134 |
> |
int haveError; |
135 |
|
|
136 |
+ |
MPI_Status istatus; |
137 |
+ |
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
138 |
+ |
|
139 |
+ |
// write out header and node 0's coordinates |
140 |
+ |
|
141 |
|
if( worldRank == 0 ){ |
142 |
|
outFile << mpiSim->getTotAtoms() << "\n"; |
143 |
< |
|
143 |
> |
|
144 |
|
outFile << currentTime << "\t" |
145 |
|
<< entry_plug->box_x << "\t" |
146 |
|
<< entry_plug->box_y << "\t" |
147 |
|
<< entry_plug->box_z << "\n"; |
148 |
< |
|
149 |
< |
masterIndex = 0; |
150 |
< |
for( i=0; i<nAtoms; i++ ){ |
148 |
> |
outFile.flush(); |
149 |
> |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
150 |
> |
// Get the Node number which has this atom; |
151 |
|
|
152 |
< |
sprintf( tempBuffer, |
153 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
154 |
< |
atoms[i]->getType(), |
152 |
< |
atoms[i]->getX(), |
153 |
< |
atoms[i]->getY(), |
154 |
< |
atoms[i]->getZ(), |
155 |
< |
atoms[i]->get_vx(), |
156 |
< |
atoms[i]->get_vy(), |
157 |
< |
atoms[i]->get_vz()); |
158 |
< |
strcpy( writeLine, tempBuffer ); |
152 |
> |
which_node = AtomToProcMap[i]; |
153 |
> |
|
154 |
> |
if (which_node == 0 ) { |
155 |
|
|
156 |
< |
if( atoms[i]->isDirectional() ){ |
156 |
> |
haveError = 0; |
157 |
> |
which_atom = i; |
158 |
> |
local_index=-1; |
159 |
> |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
160 |
> |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
161 |
> |
} |
162 |
> |
if (local_index != -1) { |
163 |
> |
//format the line |
164 |
> |
sprintf( tempBuffer, |
165 |
> |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
166 |
> |
atoms[local_index]->getType(), |
167 |
> |
atoms[local_index]->getX(), |
168 |
> |
atoms[local_index]->getY(), |
169 |
> |
atoms[local_index]->getZ(), |
170 |
> |
atoms[local_index]->get_vx(), |
171 |
> |
atoms[local_index]->get_vy(), |
172 |
> |
atoms[local_index]->get_vz()); // check here. |
173 |
> |
strcpy( writeLine, tempBuffer ); |
174 |
|
|
175 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
176 |
< |
dAtom->getQ( q ); |
177 |
< |
|
178 |
< |
sprintf( tempBuffer, |
179 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
180 |
< |
q[0], |
181 |
< |
q[1], |
182 |
< |
q[2], |
183 |
< |
q[3], |
184 |
< |
dAtom->getJx(), |
185 |
< |
dAtom->getJy(), |
186 |
< |
dAtom->getJz()); |
187 |
< |
strcat( writeLine, tempBuffer ); |
175 |
> |
if( atoms[local_index]->isDirectional() ){ |
176 |
> |
|
177 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
178 |
> |
dAtom->getQ( q ); |
179 |
> |
|
180 |
> |
sprintf( tempBuffer, |
181 |
> |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
182 |
> |
q[0], |
183 |
> |
q[1], |
184 |
> |
q[2], |
185 |
> |
q[3], |
186 |
> |
dAtom->getJx(), |
187 |
> |
dAtom->getJy(), |
188 |
> |
dAtom->getJz()); |
189 |
> |
strcat( writeLine, tempBuffer ); |
190 |
> |
|
191 |
> |
} |
192 |
> |
else |
193 |
> |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
194 |
> |
} |
195 |
> |
else { |
196 |
> |
sprintf(painCave.errMsg, |
197 |
> |
"Atom %d not found on processor %d\n", |
198 |
> |
i, worldRank ); |
199 |
> |
haveError= 1; |
200 |
> |
simError(); |
201 |
> |
} |
202 |
> |
|
203 |
> |
if(haveError) nodeZeroError(); |
204 |
> |
|
205 |
|
} |
206 |
< |
else |
207 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
206 |
> |
else { |
207 |
> |
myStatus = 1; |
208 |
> |
MPI_Send(&myStatus, 1, MPI_INT, which_node, |
209 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
210 |
> |
MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
211 |
> |
MPI_COMM_WORLD); |
212 |
> |
MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
213 |
> |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
214 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
215 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
216 |
|
|
217 |
+ |
if(!myStatus) nodeZeroError(); |
218 |
+ |
|
219 |
+ |
} |
220 |
+ |
|
221 |
|
outFile << writeLine; |
222 |
< |
masterIndex++; |
222 |
> |
outFile.flush(); |
223 |
|
} |
224 |
< |
outFile.flush(); |
225 |
< |
} |
224 |
> |
|
225 |
> |
// kill everyone off: |
226 |
> |
myStatus = -1; |
227 |
> |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
228 |
> |
MPI_Send(&myStatus, 1, MPI_INT, j, |
229 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
230 |
> |
} |
231 |
|
|
232 |
< |
sprintf( checkPointMsg, |
186 |
< |
"Sucessfully wrote node 0's dump configuration.\n"); |
187 |
< |
MPIcheckPoint(); |
232 |
> |
} else { |
233 |
|
|
234 |
< |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
235 |
< |
procIndex++){ |
234 |
> |
done = 0; |
235 |
> |
while (!done) { |
236 |
> |
|
237 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, 0, |
238 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
239 |
|
|
240 |
< |
if( worldRank == 0 ){ |
240 |
> |
if(!myStatus) anonymousNodeDie(); |
241 |
|
|
242 |
< |
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
243 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
244 |
< |
|
245 |
< |
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
242 |
> |
if(myStatus < 0) break; |
243 |
> |
|
244 |
> |
MPI_Recv(&which_atom, 1, MPI_INT, 0, |
245 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
246 |
|
|
247 |
< |
// Make sure where node 0 is writing to, matches where the |
248 |
< |
// receiving node expects it to be. |
249 |
< |
|
250 |
< |
if (masterIndex != nodeAtomsStart){ |
204 |
< |
sendError = 1; |
205 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 |
< |
MPI_COMM_WORLD); |
207 |
< |
sprintf(painCave.errMsg, |
208 |
< |
"DumpWriter error: atoms start index (%d) for " |
209 |
< |
"node %d not equal to master index (%d)", |
210 |
< |
nodeAtomsStart,procIndex,masterIndex ); |
211 |
< |
painCave.isFatal = 1; |
212 |
< |
simError(); |
247 |
> |
myStatus = 1; |
248 |
> |
local_index=-1; |
249 |
> |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
250 |
> |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
251 |
|
} |
252 |
< |
|
253 |
< |
sendError = 0; |
216 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 |
< |
MPI_COMM_WORLD); |
218 |
< |
|
219 |
< |
// recieve the nodes writeLines |
220 |
< |
|
221 |
< |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 |
< |
|
223 |
< |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 |
< |
|
226 |
< |
outFile << writeLine; |
227 |
< |
masterIndex++; |
228 |
< |
} |
229 |
< |
} |
230 |
< |
|
231 |
< |
else if( worldRank == procIndex ){ |
232 |
< |
|
233 |
< |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
234 |
< |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
235 |
< |
|
236 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 |
< |
MPI_COMM_WORLD); |
238 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 |
< |
MPI_COMM_WORLD); |
240 |
< |
|
241 |
< |
sendError = -1; |
242 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 |
< |
MPI_COMM_WORLD, istatus); |
244 |
< |
|
245 |
< |
if (sendError) MPIcheckPoint(); |
246 |
< |
|
247 |
< |
// send current node's configuration line by line. |
248 |
< |
|
249 |
< |
for( i=0; i<nAtoms; i++ ){ |
250 |
< |
|
252 |
> |
if (local_index != -1) { |
253 |
> |
//format the line |
254 |
|
sprintf( tempBuffer, |
255 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
256 |
< |
atoms[i]->getType(), |
257 |
< |
atoms[i]->getX(), |
258 |
< |
atoms[i]->getY(), |
259 |
< |
atoms[i]->getZ(), |
260 |
< |
atoms[i]->get_vx(), |
261 |
< |
atoms[i]->get_vy(), |
262 |
< |
atoms[i]->get_vz()); // check here. |
256 |
> |
atoms[local_index]->getType(), |
257 |
> |
atoms[local_index]->getX(), |
258 |
> |
atoms[local_index]->getY(), |
259 |
> |
atoms[local_index]->getZ(), |
260 |
> |
atoms[local_index]->get_vx(), |
261 |
> |
atoms[local_index]->get_vy(), |
262 |
> |
atoms[local_index]->get_vz()); // check here. |
263 |
|
strcpy( writeLine, tempBuffer ); |
264 |
+ |
|
265 |
+ |
if( atoms[local_index]->isDirectional() ){ |
266 |
|
|
267 |
< |
if( atoms[i]->isDirectional() ){ |
263 |
< |
|
264 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
267 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
268 |
|
dAtom->getQ( q ); |
269 |
< |
|
269 |
> |
|
270 |
|
sprintf( tempBuffer, |
271 |
|
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
272 |
|
q[0], |
278 |
|
dAtom->getJz()); |
279 |
|
strcat( writeLine, tempBuffer ); |
280 |
|
} |
281 |
< |
else |
281 |
> |
else{ |
282 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
283 |
< |
|
284 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
285 |
< |
MPI_COMM_WORLD); |
283 |
> |
} |
284 |
> |
} |
285 |
> |
else { |
286 |
> |
sprintf(painCave.errMsg, |
287 |
> |
"Atom %d not found on processor %d\n", |
288 |
> |
which_atom, worldRank ); |
289 |
> |
myStatus = 0; |
290 |
> |
simError(); |
291 |
> |
|
292 |
> |
strcpy( writeLine, "Hello, I'm an error.\n"); |
293 |
|
} |
294 |
+ |
|
295 |
+ |
MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
296 |
+ |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
297 |
+ |
MPI_Send( &myStatus, 1, MPI_INT, 0, |
298 |
+ |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
299 |
|
} |
300 |
< |
|
301 |
< |
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
302 |
< |
procIndex); |
303 |
< |
MPIcheckPoint(); |
304 |
< |
} |
305 |
< |
|
300 |
> |
} |
301 |
> |
outFile.flush(); |
302 |
> |
sprintf( checkPointMsg, |
303 |
> |
"Sucessfully took a dump.\n"); |
304 |
> |
MPIcheckPoint(); |
305 |
> |
|
306 |
> |
// last thing last, enable fatalities. |
307 |
> |
painCave.isEventLoop = 0; |
308 |
> |
|
309 |
|
#endif // is_mpi |
310 |
|
} |
311 |
|
|
294 |
– |
|
295 |
– |
|
312 |
|
void DumpWriter::writeFinal(){ |
313 |
|
|
314 |
+ |
char finalName[500]; |
315 |
+ |
ofstream finalOut; |
316 |
|
|
317 |
|
const int BUFFERSIZE = 2000; |
318 |
< |
char tempBuffer[500]; |
319 |
< |
char writeLine[BUFFERSIZE]; |
302 |
< |
|
303 |
< |
char finalName[500]; |
318 |
> |
char tempBuffer[BUFFERSIZE]; |
319 |
> |
char writeLine[BUFFERSIZE]; |
320 |
|
|
305 |
– |
int i; |
321 |
|
double q[4]; |
322 |
|
DirectionalAtom* dAtom; |
323 |
|
int nAtoms = entry_plug->n_atoms; |
324 |
|
Atom** atoms = entry_plug->atoms; |
325 |
+ |
int i, j, which_node, done, game_over, which_atom, local_index; |
326 |
|
|
311 |
– |
ofstream finalOut; |
327 |
|
|
328 |
|
#ifdef IS_MPI |
329 |
|
if(worldRank == 0 ){ |
350 |
|
|
351 |
|
#endif //is_mpi |
352 |
|
|
353 |
< |
|
339 |
< |
|
353 |
> |
|
354 |
|
#ifndef IS_MPI |
355 |
|
|
356 |
|
finalOut << nAtoms << "\n"; |
358 |
|
finalOut << entry_plug->box_x << "\t" |
359 |
|
<< entry_plug->box_y << "\t" |
360 |
|
<< entry_plug->box_z << "\n"; |
361 |
< |
|
361 |
> |
|
362 |
|
for( i=0; i<nAtoms; i++ ){ |
363 |
|
|
364 |
|
sprintf( tempBuffer, |
394 |
|
finalOut << writeLine; |
395 |
|
} |
396 |
|
finalOut.flush(); |
397 |
+ |
finalOut.close(); |
398 |
|
|
399 |
|
#else // is_mpi |
400 |
+ |
|
401 |
+ |
// first thing first, suspend fatalities. |
402 |
+ |
painCave.isEventLoop = 1; |
403 |
|
|
404 |
< |
int masterIndex; |
405 |
< |
int nodeAtomsStart; |
388 |
< |
int nodeAtomsEnd; |
389 |
< |
int mpiErr; |
390 |
< |
int sendError; |
391 |
< |
int procIndex; |
392 |
< |
|
393 |
< |
MPI_Status istatus[MPI_STATUS_SIZE]; |
404 |
> |
int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
405 |
> |
int haveError; |
406 |
|
|
407 |
< |
|
408 |
< |
// write out header and node 0's coordinates |
407 |
> |
MPI_Status istatus; |
408 |
> |
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
409 |
|
|
410 |
+ |
// write out header and node 0's coordinates |
411 |
+ |
|
412 |
+ |
haveError = 0; |
413 |
|
if( worldRank == 0 ){ |
414 |
|
finalOut << mpiSim->getTotAtoms() << "\n"; |
415 |
< |
|
415 |
> |
|
416 |
|
finalOut << entry_plug->box_x << "\t" |
417 |
< |
<< entry_plug->box_y << "\t" |
418 |
< |
<< entry_plug->box_z << "\n"; |
417 |
> |
<< entry_plug->box_y << "\t" |
418 |
> |
<< entry_plug->box_z << "\n"; |
419 |
|
|
420 |
< |
masterIndex = 0; |
421 |
< |
|
407 |
< |
for( i=0; i<nAtoms; i++ ){ |
420 |
> |
for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
421 |
> |
// Get the Node number which has this molecule: |
422 |
|
|
423 |
< |
sprintf( tempBuffer, |
424 |
< |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
425 |
< |
atoms[i]->getType(), |
426 |
< |
atoms[i]->getX(), |
427 |
< |
atoms[i]->getY(), |
428 |
< |
atoms[i]->getZ(), |
429 |
< |
atoms[i]->get_vx(), |
430 |
< |
atoms[i]->get_vy(), |
431 |
< |
atoms[i]->get_vz()); |
432 |
< |
strcpy( writeLine, tempBuffer ); |
433 |
< |
|
434 |
< |
if( atoms[i]->isDirectional() ){ |
423 |
> |
which_node = AtomToProcMap[i]; |
424 |
> |
|
425 |
> |
if (which_node == mpiSim->getMyNode()) { |
426 |
> |
|
427 |
> |
which_atom = i; |
428 |
> |
local_index=-1; |
429 |
> |
for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
430 |
> |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
431 |
> |
} |
432 |
> |
if (local_index != -1) { |
433 |
> |
sprintf( tempBuffer, |
434 |
> |
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
435 |
> |
atoms[local_index]->getType(), |
436 |
> |
atoms[local_index]->getX(), |
437 |
> |
atoms[local_index]->getY(), |
438 |
> |
atoms[local_index]->getZ(), |
439 |
> |
atoms[local_index]->get_vx(), |
440 |
> |
atoms[local_index]->get_vy(), |
441 |
> |
atoms[local_index]->get_vz()); |
442 |
> |
strcpy( writeLine, tempBuffer ); |
443 |
|
|
444 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
445 |
< |
dAtom->getQ( q ); |
446 |
< |
|
447 |
< |
sprintf( tempBuffer, |
448 |
< |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
449 |
< |
q[0], |
450 |
< |
q[1], |
451 |
< |
q[2], |
452 |
< |
q[3], |
453 |
< |
dAtom->getJx(), |
454 |
< |
dAtom->getJy(), |
455 |
< |
dAtom->getJz()); |
456 |
< |
strcat( writeLine, tempBuffer ); |
457 |
< |
} |
458 |
< |
else |
459 |
< |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
444 |
> |
if( atoms[local_index]->isDirectional() ){ |
445 |
> |
|
446 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
447 |
> |
dAtom->getQ( q ); |
448 |
> |
|
449 |
> |
sprintf( tempBuffer, |
450 |
> |
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
451 |
> |
q[0], |
452 |
> |
q[1], |
453 |
> |
q[2], |
454 |
> |
q[3], |
455 |
> |
dAtom->getJx(), |
456 |
> |
dAtom->getJy(), |
457 |
> |
dAtom->getJz()); |
458 |
> |
strcat( writeLine, tempBuffer ); |
459 |
> |
} |
460 |
> |
else |
461 |
> |
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
462 |
> |
} |
463 |
> |
else { |
464 |
> |
sprintf(painCave.errMsg, |
465 |
> |
"Atom %d not found on processor %d\n", |
466 |
> |
i, worldRank ); |
467 |
> |
haveError= 1; |
468 |
> |
simError(); |
469 |
> |
} |
470 |
> |
|
471 |
> |
if(haveError) nodeZeroError(); |
472 |
> |
|
473 |
> |
} |
474 |
> |
else { |
475 |
> |
|
476 |
> |
myStatus = 1; |
477 |
> |
MPI_Send(&myStatus, 1, MPI_INT, which_node, |
478 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
479 |
> |
MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
480 |
> |
MPI_COMM_WORLD); |
481 |
> |
MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
482 |
> |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
483 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
484 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
485 |
|
|
486 |
+ |
if(!myStatus) nodeZeroError(); |
487 |
+ |
} |
488 |
+ |
|
489 |
|
finalOut << writeLine; |
440 |
– |
masterIndex++; |
490 |
|
} |
442 |
– |
finalOut.flush(); |
443 |
– |
} |
491 |
|
|
492 |
< |
for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
493 |
< |
procIndex++){ |
492 |
> |
// kill everyone off: |
493 |
> |
myStatus = -1; |
494 |
> |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
495 |
> |
MPI_Send(&myStatus, 1, MPI_INT, j, |
496 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
497 |
> |
} |
498 |
|
|
499 |
< |
if( worldRank == 0 ){ |
500 |
< |
|
501 |
< |
mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
502 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
499 |
> |
} else { |
500 |
> |
|
501 |
> |
done = 0; |
502 |
> |
while (!done) { |
503 |
|
|
504 |
< |
mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
505 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
506 |
< |
|
507 |
< |
// Make sure where node 0 is writing to, matches where the |
508 |
< |
// receiving node expects it to be. |
509 |
< |
|
510 |
< |
if (masterIndex != nodeAtomsStart){ |
511 |
< |
sendError = 1; |
512 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
513 |
< |
MPI_COMM_WORLD); |
514 |
< |
sprintf(painCave.errMsg, |
515 |
< |
"DumpWriter error: atoms start index (%d) for " |
516 |
< |
"node %d not equal to master index (%d)", |
517 |
< |
nodeAtomsStart,procIndex,masterIndex ); |
467 |
< |
painCave.isFatal = 1; |
468 |
< |
simError(); |
504 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, 0, |
505 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
506 |
> |
|
507 |
> |
if(!myStatus) anonymousNodeDie(); |
508 |
> |
|
509 |
> |
if(myStatus < 0) break; |
510 |
> |
|
511 |
> |
MPI_Recv(&which_atom, 1, MPI_INT, 0, |
512 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
513 |
> |
|
514 |
> |
myStatus = 1; |
515 |
> |
local_index=-1; |
516 |
> |
for (j=0; j < mpiSim->getMyNlocal(); j++) { |
517 |
> |
if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
518 |
|
} |
519 |
< |
|
471 |
< |
sendError = 0; |
472 |
< |
mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 |
< |
MPI_COMM_WORLD); |
519 |
> |
if (local_index != -1) { |
520 |
|
|
521 |
< |
// recieve the nodes writeLines |
476 |
< |
|
477 |
< |
for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 |
< |
|
479 |
< |
mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 |
< |
TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 |
< |
|
482 |
< |
finalOut << writeLine; |
483 |
< |
masterIndex++; |
484 |
< |
} |
485 |
< |
|
486 |
< |
finalOut.flush(); |
487 |
< |
} |
488 |
< |
|
489 |
< |
else if( worldRank == procIndex ){ |
490 |
< |
|
491 |
< |
nodeAtomsStart = mpiSim->getMyAtomStart(); |
492 |
< |
nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
493 |
< |
|
494 |
< |
mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 |
< |
MPI_COMM_WORLD); |
496 |
< |
mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 |
< |
MPI_COMM_WORLD); |
498 |
< |
|
499 |
< |
mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 |
< |
MPI_COMM_WORLD, istatus); |
501 |
< |
if (sendError) MPIcheckPoint(); |
502 |
< |
|
503 |
< |
// send current node's configuration line by line. |
504 |
< |
|
505 |
< |
for( i=0; i<nAtoms; i++ ){ |
506 |
< |
|
521 |
> |
//format the line |
522 |
|
sprintf( tempBuffer, |
523 |
|
"%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
524 |
< |
atoms[i]->getType(), |
525 |
< |
atoms[i]->getX(), |
526 |
< |
atoms[i]->getY(), |
527 |
< |
atoms[i]->getZ(), |
528 |
< |
atoms[i]->get_vx(), |
529 |
< |
atoms[i]->get_vy(), |
530 |
< |
atoms[i]->get_vz()); |
524 |
> |
atoms[local_index]->getType(), |
525 |
> |
atoms[local_index]->getX(), |
526 |
> |
atoms[local_index]->getY(), |
527 |
> |
atoms[local_index]->getZ(), |
528 |
> |
atoms[local_index]->get_vx(), |
529 |
> |
atoms[local_index]->get_vy(), |
530 |
> |
atoms[local_index]->get_vz()); // check here. |
531 |
|
strcpy( writeLine, tempBuffer ); |
532 |
+ |
|
533 |
+ |
if( atoms[local_index]->isDirectional() ){ |
534 |
|
|
535 |
< |
if( atoms[i]->isDirectional() ){ |
519 |
< |
|
520 |
< |
dAtom = (DirectionalAtom *)atoms[i]; |
535 |
> |
dAtom = (DirectionalAtom *)atoms[local_index]; |
536 |
|
dAtom->getQ( q ); |
537 |
< |
|
537 |
> |
|
538 |
|
sprintf( tempBuffer, |
539 |
|
"%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
540 |
|
q[0], |
546 |
|
dAtom->getJz()); |
547 |
|
strcat( writeLine, tempBuffer ); |
548 |
|
} |
549 |
< |
else |
549 |
> |
else{ |
550 |
|
strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
551 |
< |
|
552 |
< |
mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
553 |
< |
MPI_COMM_WORLD); |
551 |
> |
} |
552 |
> |
} |
553 |
> |
else { |
554 |
> |
sprintf(painCave.errMsg, |
555 |
> |
"Atom %d not found on processor %d\n", |
556 |
> |
which_atom, worldRank ); |
557 |
> |
myStatus = 0; |
558 |
> |
simError(); |
559 |
> |
|
560 |
> |
strcpy( writeLine, "Hello, I'm an error.\n"); |
561 |
|
} |
562 |
+ |
|
563 |
+ |
MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
564 |
+ |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
565 |
+ |
MPI_Send( &myStatus, 1, MPI_INT, 0, |
566 |
+ |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
567 |
|
} |
541 |
– |
|
542 |
– |
sprintf(checkPointMsg,"Node %d sent dump configuration.", |
543 |
– |
procIndex); |
544 |
– |
MPIcheckPoint(); |
568 |
|
} |
569 |
+ |
finalOut.flush(); |
570 |
+ |
sprintf( checkPointMsg, |
571 |
+ |
"Sucessfully took a dump.\n"); |
572 |
+ |
MPIcheckPoint(); |
573 |
+ |
|
574 |
+ |
if( worldRank == 0 ) finalOut.close(); |
575 |
+ |
#endif // is_mpi |
576 |
+ |
} |
577 |
|
|
547 |
– |
if( worldRank == 0 ) finalOut.close(); |
578 |
|
|
579 |
< |
|
580 |
< |
#endif // is_mpi |
579 |
> |
|
580 |
> |
#ifdef IS_MPI |
581 |
> |
|
582 |
> |
// a couple of functions to let us escape the write loop |
583 |
> |
|
584 |
> |
void dWrite::nodeZeroError( void ){ |
585 |
> |
int j, myStatus; |
586 |
> |
|
587 |
> |
myStatus = 0; |
588 |
> |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
589 |
> |
MPI_Send( &myStatus, 1, MPI_INT, j, |
590 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
591 |
> |
} |
592 |
> |
|
593 |
> |
|
594 |
> |
MPI_Finalize(); |
595 |
> |
exit (0); |
596 |
> |
|
597 |
|
} |
598 |
+ |
|
599 |
+ |
void dWrite::anonymousNodeDie( void ){ |
600 |
+ |
|
601 |
+ |
MPI_Finalize(); |
602 |
+ |
exit (0); |
603 |
+ |
} |
604 |
+ |
|
605 |
+ |
#endif //is_mpi |