13 |
|
|
14 |
|
#ifdef IS_MPI |
15 |
|
#include <mpi.h> |
16 |
– |
#include <mpi++.h> |
16 |
|
#include "mpiSimulation.hpp" |
17 |
< |
#define TAKE_THIS_TAG 0 |
17 |
> |
#define TAKE_THIS_TAG_CHAR 0 |
18 |
> |
#define TAKE_THIS_TAG_INT 1 |
19 |
> |
|
20 |
> |
namespace initFile{ |
21 |
> |
void nodeZeroError( void ); |
22 |
> |
void anonymousNodeDie( void ); |
23 |
> |
} |
24 |
> |
|
25 |
> |
using namespace initFile; |
26 |
> |
|
27 |
|
#endif // is_mpi |
28 |
|
|
29 |
|
InitializeFromFile :: InitializeFromFile( char *in_name ){ |
147 |
|
// MPI Section of code.......... |
148 |
|
#else //IS_MPI |
149 |
|
|
150 |
< |
MPI::Status istatus; |
150 |
> |
// first thing first, suspend fatalities. |
151 |
> |
painCave.isEventLoop = 1; |
152 |
> |
|
153 |
> |
int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
154 |
> |
int haveError; |
155 |
> |
|
156 |
> |
MPI_Status istatus; |
157 |
|
int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
158 |
|
|
159 |
+ |
|
160 |
+ |
haveError = 0; |
161 |
|
if (worldRank == 0) { |
162 |
+ |
|
163 |
|
eof_test = fgets(read_buffer, sizeof(read_buffer), c_in_file); |
164 |
|
if( eof_test == NULL ){ |
165 |
|
sprintf( painCave.errMsg, |
166 |
|
"Error reading 1st line of %d \n ",c_in_name); |
167 |
< |
painCave.isFatal = 1; |
167 |
> |
haveError = 1; |
168 |
|
simError(); |
169 |
|
} |
170 |
|
|
181 |
|
"Initialize from File error. %s n_atoms, %d, " |
182 |
|
"does not match the BASS file's n_atoms, %d.\n", |
183 |
|
c_in_name, n_atoms, entry_plug->n_atoms ); |
184 |
< |
painCave.isFatal = 1; |
184 |
> |
haveError= 1; |
185 |
|
simError(); |
186 |
|
} |
187 |
|
|
191 |
|
if(eof_test == NULL){ |
192 |
|
sprintf( painCave.errMsg, |
193 |
|
"error in reading commment in %s\n", c_in_name); |
194 |
< |
painCave.isFatal = 1; |
194 |
> |
haveError= 1; |
195 |
|
simError(); |
196 |
|
} |
197 |
|
|
198 |
+ |
if(haveError) nodeZeroError(); |
199 |
|
|
200 |
|
for (i=0 ; i < mpiSim->getTotAtoms(); i++) { |
201 |
|
|
206 |
|
"natoms = %d; index = %d\n" |
207 |
|
"error reading the line from the file.\n", |
208 |
|
c_in_name, n_atoms, i ); |
209 |
< |
painCave.isFatal = 1; |
209 |
> |
haveError= 1; |
210 |
|
simError(); |
211 |
|
} |
212 |
|
|
213 |
+ |
if(haveError) nodeZeroError(); |
214 |
+ |
|
215 |
|
// Get the Node number which wants this atom: |
216 |
|
which_node = AtomToProcMap[i]; |
217 |
< |
if (which_node == mpiSim->getMyNode()) { |
217 |
> |
if (which_node == 0) { |
218 |
|
parseErr = parseDumpLine( read_buffer, i ); |
219 |
|
if( parseErr != NULL ){ |
220 |
|
strcpy( painCave.errMsg, parseErr ); |
221 |
< |
painCave.isFatal = 1; |
221 |
> |
haveError = 1; |
222 |
|
simError(); |
223 |
|
} |
224 |
< |
} else { |
225 |
< |
MPI::COMM_WORLD.Send(read_buffer, BUFFERSIZE, MPI_CHAR, which_node, |
226 |
< |
TAKE_THIS_TAG); |
227 |
< |
MPI::COMM_WORLD.Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG); |
224 |
> |
if(haveError) nodeZeroError(); |
225 |
> |
} |
226 |
> |
|
227 |
> |
else { |
228 |
> |
|
229 |
> |
myStatus = 1; |
230 |
> |
MPI_Send(&myStatus, 1, MPI_INT, which_node, |
231 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
232 |
> |
MPI_Send(read_buffer, BUFFERSIZE, MPI_CHAR, which_node, |
233 |
> |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
234 |
> |
MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
235 |
> |
MPI_COMM_WORLD); |
236 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
237 |
> |
MPI_COMM_WORLD, &istatus); |
238 |
> |
|
239 |
> |
if(!myStatus) nodeZeroError(); |
240 |
|
} |
241 |
|
} |
242 |
< |
sprintf(read_buffer, "GAMEOVER"); |
242 |
> |
myStatus = -1; |
243 |
|
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
244 |
< |
MPI::COMM_WORLD.Send(read_buffer, BUFFERSIZE, MPI_CHAR, j, |
245 |
< |
TAKE_THIS_TAG); |
244 |
> |
MPI_Send( &myStatus, 1, MPI_INT, j, |
245 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
246 |
|
} |
247 |
|
|
248 |
|
} else { |
249 |
|
|
250 |
|
done = 0; |
251 |
|
while (!done) { |
252 |
< |
MPI::COMM_WORLD.Recv(read_buffer, BUFFERSIZE, MPI_CHAR, 0, |
253 |
< |
TAKE_THIS_TAG, istatus); |
254 |
< |
if (strcmp(read_buffer, "GAMEOVER")) { |
255 |
< |
done = 1; |
256 |
< |
continue; |
257 |
< |
} else { |
258 |
< |
MPI::COMM_WORLD.Recv(&which_atom, 1, MPI_INT, 0, |
259 |
< |
TAKE_THIS_TAG, istatus); |
260 |
< |
|
261 |
< |
parseErr = parseDumpLine( read_buffer, which_atom ); |
262 |
< |
if( parseErr != NULL ){ |
263 |
< |
strcpy( painCave.errMsg, parseErr ); |
264 |
< |
painCave.isFatal = 1; |
265 |
< |
simError(); |
266 |
< |
} |
252 |
> |
|
253 |
> |
MPI_Recv(&myStatus, 1, MPI_INT, 0, |
254 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
255 |
> |
|
256 |
> |
if(!myStatus) anonymousNodeDie(); |
257 |
> |
|
258 |
> |
if(myStatus < 0) break; |
259 |
> |
|
260 |
> |
MPI_Recv(read_buffer, BUFFERSIZE, MPI_CHAR, 0, |
261 |
> |
TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
262 |
> |
MPI_Recv(&which_atom, 1, MPI_INT, 0, |
263 |
> |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
264 |
> |
|
265 |
> |
myStatus = 1; |
266 |
> |
parseErr = parseDumpLine( read_buffer, which_atom ); |
267 |
> |
if( parseErr != NULL ){ |
268 |
> |
strcpy( painCave.errMsg, parseErr ); |
269 |
> |
myStatus = 0;; |
270 |
> |
simError(); |
271 |
|
} |
272 |
+ |
|
273 |
+ |
MPI_Send( &myStatus, 1, MPI_INT, 0, |
274 |
+ |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
275 |
+ |
|
276 |
|
} |
277 |
|
} |
278 |
< |
|
278 |
> |
|
279 |
> |
// last thing last, enable fatalities. |
280 |
> |
painCave.isEventLoop = 0; |
281 |
> |
|
282 |
|
#endif |
283 |
|
} |
284 |
|
|
505 |
|
|
506 |
|
return NULL; |
507 |
|
} |
508 |
+ |
|
509 |
+ |
|
510 |
+ |
#ifdef IS_MPI |
511 |
+ |
|
512 |
+ |
// a couple of functions to let us escape the read loop |
513 |
+ |
|
514 |
+ |
void initFile::nodeZeroError( void ){ |
515 |
+ |
int j, myStatus; |
516 |
+ |
|
517 |
+ |
myStatus = 0; |
518 |
+ |
for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
519 |
+ |
MPI_Send( &myStatus, 1, MPI_INT, j, |
520 |
+ |
TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
521 |
+ |
} |
522 |
+ |
|
523 |
+ |
|
524 |
+ |
MPI_Finalize(); |
525 |
+ |
exit (0); |
526 |
+ |
|
527 |
+ |
} |
528 |
+ |
|
529 |
+ |
void initFile::anonymousNodeDie( void ){ |
530 |
+ |
|
531 |
+ |
MPI_Finalize(); |
532 |
+ |
exit (0); |
533 |
+ |
} |
534 |
+ |
|
535 |
+ |
#endif //is_mpi |