# | Line 1 | Line 1 | |
---|---|---|
1 | + | #define _FILE_OFFSET_BITS 64 |
2 | + | |
3 | #include <cstring> | |
4 | #include <iostream> | |
5 | #include <fstream> | |
# | Line 5 | Line 7 | |
7 | #ifdef IS_MPI | |
8 | #include <mpi.h> | |
9 | #include "mpiSimulation.hpp" | |
10 | < | #define TAKE_THIS_TAG 0 |
10 | > | #define TAKE_THIS_TAG_CHAR 1 |
11 | > | #define TAKE_THIS_TAG_INT 2 |
12 | > | |
13 | > | namespace dWrite{ |
14 | > | void nodeZeroError( void ); |
15 | > | void anonymousNodeDie( void ); |
16 | > | } |
17 | > | |
18 | > | using namespace dWrite; |
19 | #endif //is_mpi | |
20 | ||
21 | #include "ReadWrite.hpp" | |
22 | #include "simError.h" | |
23 | ||
14 | – | |
15 | – | |
16 | – | |
17 | – | |
24 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
25 | ||
26 | entry_plug = the_entry_plug; | |
# | Line 23 | Line 29 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
29 | if(worldRank == 0 ){ | |
30 | #endif // is_mpi | |
31 | ||
26 | – | |
27 | – | |
32 | strcpy( outName, entry_plug->sampleName ); | |
33 | ||
34 | outFile.open(outName, ios::out | ios::trunc ); | |
# | Line 37 | Line 41 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
41 | painCave.isFatal = 1; | |
42 | simError(); | |
43 | } | |
44 | < | |
44 | > | |
45 | //outFile.setf( ios::scientific ); | |
46 | ||
47 | #ifdef IS_MPI | |
# | Line 68 | Line 72 | void DumpWriter::writeDump( double currentTime ){ | |
72 | char tempBuffer[BUFFERSIZE]; | |
73 | char writeLine[BUFFERSIZE]; | |
74 | ||
75 | < | int i; |
75 | > | int i, j, which_node, done, which_atom, local_index; |
76 | double q[4]; | |
77 | DirectionalAtom* dAtom; | |
78 | int nAtoms = entry_plug->n_atoms; | |
79 | Atom** atoms = entry_plug->atoms; | |
80 | + | |
81 | + | double pos[3], vel[3]; |
82 | ||
83 | ||
84 | #ifndef IS_MPI | |
85 | ||
86 | outFile << nAtoms << "\n"; | |
87 | ||
88 | < | outFile << currentTime << "\t" |
89 | < | << entry_plug->box_x << "\t" |
90 | < | << entry_plug->box_y << "\t" |
91 | < | << entry_plug->box_z << "\n"; |
88 | > | outFile << currentTime << ";\t" |
89 | > | << entry_plug->Hmat[0][0] << "\t" |
90 | > | << entry_plug->Hmat[1][0] << "\t" |
91 | > | << entry_plug->Hmat[2][0] << ";\t" |
92 | > | |
93 | > | << entry_plug->Hmat[0][1] << "\t" |
94 | > | << entry_plug->Hmat[1][1] << "\t" |
95 | > | << entry_plug->Hmat[2][1] << ";\t" |
96 | > | |
97 | > | << entry_plug->Hmat[0][2] << "\t" |
98 | > | << entry_plug->Hmat[1][2] << "\t" |
99 | > | << entry_plug->Hmat[2][2] << ";\n"; |
100 | ||
101 | for( i=0; i<nAtoms; i++ ){ | |
102 | ||
103 | + | atoms[i]->getPos(pos); |
104 | + | atoms[i]->getVel(vel); |
105 | ||
106 | sprintf( tempBuffer, | |
107 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
108 | atoms[i]->getType(), | |
109 | < | atoms[i]->getX(), |
110 | < | atoms[i]->getY(), |
111 | < | atoms[i]->getZ(), |
112 | < | atoms[i]->get_vx(), |
113 | < | atoms[i]->get_vy(), |
114 | < | atoms[i]->get_vz()); |
109 | > | pos[0], |
110 | > | pos[1], |
111 | > | pos[2], |
112 | > | vel[0], |
113 | > | vel[1], |
114 | > | vel[2]); |
115 | strcpy( writeLine, tempBuffer ); | |
116 | ||
117 | if( atoms[i]->isDirectional() ){ | |
# | Line 123 | Line 139 | void DumpWriter::writeDump( double currentTime ){ | |
139 | ||
140 | #else // is_mpi | |
141 | ||
142 | < | int masterIndex; |
143 | < | int nodeAtomsStart; |
128 | < | int nodeAtomsEnd; |
129 | < | int mpiErr; |
130 | < | int sendError; |
131 | < | int procIndex; |
132 | < | |
133 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
142 | > | // first thing first, suspend fatalities. |
143 | > | painCave.isEventLoop = 1; |
144 | ||
145 | < | |
146 | < | // write out header and node 0's coordinates |
145 | > | int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
146 | > | int haveError; |
147 | ||
148 | + | MPI_Status istatus; |
149 | + | int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
150 | + | |
151 | + | // write out header and node 0's coordinates |
152 | + | |
153 | if( worldRank == 0 ){ | |
154 | outFile << mpiSim->getTotAtoms() << "\n"; | |
155 | + | |
156 | + | outFile << currentTime << ";\t" |
157 | + | << entry_plug->Hmat[0][0] << "\t" |
158 | + | << entry_plug->Hmat[1][0] << "\t" |
159 | + | << entry_plug->Hmat[2][0] << ";\t" |
160 | ||
161 | < | outFile << currentTime << "\t" |
162 | < | << entry_plug->box_x << "\t" |
163 | < | << entry_plug->box_y << "\t" |
144 | < | << entry_plug->box_z << "\n"; |
145 | < | |
146 | < | masterIndex = 0; |
147 | < | for( i=0; i<nAtoms; i++ ){ |
161 | > | << entry_plug->Hmat[0][1] << "\t" |
162 | > | << entry_plug->Hmat[1][1] << "\t" |
163 | > | << entry_plug->Hmat[2][1] << ";\t" |
164 | ||
165 | < | sprintf( tempBuffer, |
166 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
167 | < | atoms[i]->getType(), |
168 | < | atoms[i]->getX(), |
169 | < | atoms[i]->getY(), |
170 | < | atoms[i]->getZ(), |
171 | < | atoms[i]->get_vx(), |
172 | < | atoms[i]->get_vy(), |
173 | < | atoms[i]->get_vz()); |
174 | < | strcpy( writeLine, tempBuffer ); |
165 | > | << entry_plug->Hmat[0][2] << "\t" |
166 | > | << entry_plug->Hmat[1][2] << "\t" |
167 | > | << entry_plug->Hmat[2][2] << ";\n"; |
168 | > | |
169 | > | outFile.flush(); |
170 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
171 | > | // Get the Node number which has this atom; |
172 | > | |
173 | > | which_node = AtomToProcMap[i]; |
174 | > | |
175 | > | if (which_node == 0 ) { |
176 | ||
177 | < | if( atoms[i]->isDirectional() ){ |
177 | > | haveError = 0; |
178 | > | which_atom = i; |
179 | > | local_index=-1; |
180 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
181 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
182 | > | } |
183 | > | if (local_index != -1) { |
184 | > | //format the line |
185 | ||
186 | < | dAtom = (DirectionalAtom *)atoms[i]; |
187 | < | dAtom->getQ( q ); |
186 | > | atoms[local_index]->getPos(pos); |
187 | > | atoms[local_index]->getVel(vel); |
188 | > | |
189 | > | sprintf( tempBuffer, |
190 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
191 | > | atoms[local_index]->getType(), |
192 | > | pos[0], |
193 | > | pos[1], |
194 | > | pos[2], |
195 | > | vel[0], |
196 | > | vel[1], |
197 | > | vel[2]); // check here. |
198 | > | strcpy( writeLine, tempBuffer ); |
199 | ||
200 | < | sprintf( tempBuffer, |
201 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
202 | < | q[0], |
203 | < | q[1], |
204 | < | q[2], |
205 | < | q[3], |
206 | < | dAtom->getJx(), |
207 | < | dAtom->getJy(), |
208 | < | dAtom->getJz()); |
209 | < | strcat( writeLine, tempBuffer ); |
200 | > | if( atoms[local_index]->isDirectional() ){ |
201 | > | |
202 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
203 | > | dAtom->getQ( q ); |
204 | > | |
205 | > | sprintf( tempBuffer, |
206 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
207 | > | q[0], |
208 | > | q[1], |
209 | > | q[2], |
210 | > | q[3], |
211 | > | dAtom->getJx(), |
212 | > | dAtom->getJy(), |
213 | > | dAtom->getJz()); |
214 | > | strcat( writeLine, tempBuffer ); |
215 | > | |
216 | > | } |
217 | > | else |
218 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
219 | > | } |
220 | > | else { |
221 | > | sprintf(painCave.errMsg, |
222 | > | "Atom %d not found on processor %d\n", |
223 | > | i, worldRank ); |
224 | > | haveError= 1; |
225 | > | simError(); |
226 | > | } |
227 | > | |
228 | > | if(haveError) nodeZeroError(); |
229 | > | |
230 | } | |
231 | < | else |
232 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
231 | > | else { |
232 | > | myStatus = 1; |
233 | > | MPI_Send(&myStatus, 1, MPI_INT, which_node, |
234 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
235 | > | MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
236 | > | MPI_COMM_WORLD); |
237 | > | MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
238 | > | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
239 | > | MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
240 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
241 | ||
242 | + | if(!myStatus) nodeZeroError(); |
243 | + | |
244 | + | } |
245 | + | |
246 | outFile << writeLine; | |
247 | < | masterIndex++; |
247 | > | outFile.flush(); |
248 | } | |
249 | < | outFile.flush(); |
250 | < | } |
249 | > | |
250 | > | // kill everyone off: |
251 | > | myStatus = -1; |
252 | > | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
253 | > | MPI_Send(&myStatus, 1, MPI_INT, j, |
254 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
255 | > | } |
256 | ||
257 | < | sprintf( checkPointMsg, |
186 | < | "Sucessfully wrote node 0's dump configuration.\n"); |
187 | < | MPIcheckPoint(); |
257 | > | } else { |
258 | ||
259 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
260 | < | procIndex++){ |
259 | > | done = 0; |
260 | > | while (!done) { |
261 | > | |
262 | > | MPI_Recv(&myStatus, 1, MPI_INT, 0, |
263 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
264 | ||
265 | < | if( worldRank == 0 ){ |
265 | > | if(!myStatus) anonymousNodeDie(); |
266 | ||
267 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
268 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
267 | > | if(myStatus < 0) break; |
268 | > | |
269 | > | MPI_Recv(&which_atom, 1, MPI_INT, 0, |
270 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
271 | ||
272 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
273 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
274 | < | |
275 | < | // Make sure where node 0 is writing to, matches where the |
201 | < | // receiving node expects it to be. |
202 | < | |
203 | < | if (masterIndex != nodeAtomsStart){ |
204 | < | sendError = 1; |
205 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 | < | MPI_COMM_WORLD); |
207 | < | sprintf(painCave.errMsg, |
208 | < | "DumpWriter error: atoms start index (%d) for " |
209 | < | "node %d not equal to master index (%d)", |
210 | < | nodeAtomsStart,procIndex,masterIndex ); |
211 | < | painCave.isFatal = 1; |
212 | < | simError(); |
272 | > | myStatus = 1; |
273 | > | local_index=-1; |
274 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
275 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
276 | } | |
277 | < | |
278 | < | sendError = 0; |
216 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 | < | MPI_COMM_WORLD); |
218 | < | |
219 | < | // recieve the nodes writeLines |
220 | < | |
221 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 | < | |
223 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 | < | |
226 | < | outFile << writeLine; |
227 | < | masterIndex++; |
228 | < | } |
229 | < | } |
277 | > | if (local_index != -1) { |
278 | > | //format the line |
279 | ||
280 | < | else if( worldRank == procIndex ){ |
280 | > | atoms[local_index]->getPos(pos); |
281 | > | atoms[local_index]->getVel(vel); |
282 | ||
233 | – | nodeAtomsStart = mpiSim->getMyAtomStart(); |
234 | – | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
235 | – | |
236 | – | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 | – | MPI_COMM_WORLD); |
238 | – | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 | – | MPI_COMM_WORLD); |
240 | – | |
241 | – | sendError = -1; |
242 | – | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 | – | MPI_COMM_WORLD, istatus); |
244 | – | |
245 | – | if (sendError) MPIcheckPoint(); |
246 | – | |
247 | – | // send current node's configuration line by line. |
248 | – | |
249 | – | for( i=0; i<nAtoms; i++ ){ |
250 | – | |
283 | sprintf( tempBuffer, | |
284 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
285 | < | atoms[i]->getType(), |
286 | < | atoms[i]->getX(), |
287 | < | atoms[i]->getY(), |
288 | < | atoms[i]->getZ(), |
289 | < | atoms[i]->get_vx(), |
290 | < | atoms[i]->get_vy(), |
291 | < | atoms[i]->get_vz()); // check here. |
285 | > | atoms[local_index]->getType(), |
286 | > | pos[0], |
287 | > | pos[1], |
288 | > | pos[2], |
289 | > | vel[0], |
290 | > | vel[1], |
291 | > | vel[2]); // check here. |
292 | strcpy( writeLine, tempBuffer ); | |
293 | + | |
294 | + | if( atoms[local_index]->isDirectional() ){ |
295 | ||
296 | < | if( atoms[i]->isDirectional() ){ |
263 | < | |
264 | < | dAtom = (DirectionalAtom *)atoms[i]; |
296 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
297 | dAtom->getQ( q ); | |
298 | < | |
298 | > | |
299 | sprintf( tempBuffer, | |
300 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | |
301 | q[0], | |
# | Line 275 | Line 307 | void DumpWriter::writeDump( double currentTime ){ | |
307 | dAtom->getJz()); | |
308 | strcat( writeLine, tempBuffer ); | |
309 | } | |
310 | < | else |
310 | > | else{ |
311 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
312 | < | |
313 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
314 | < | MPI_COMM_WORLD); |
312 | > | } |
313 | > | } |
314 | > | else { |
315 | > | sprintf(painCave.errMsg, |
316 | > | "Atom %d not found on processor %d\n", |
317 | > | which_atom, worldRank ); |
318 | > | myStatus = 0; |
319 | > | simError(); |
320 | > | |
321 | > | strcpy( writeLine, "Hello, I'm an error.\n"); |
322 | } | |
323 | + | |
324 | + | MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
325 | + | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
326 | + | MPI_Send( &myStatus, 1, MPI_INT, 0, |
327 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
328 | } | |
329 | < | |
330 | < | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
331 | < | procIndex); |
332 | < | MPIcheckPoint(); |
333 | < | } |
334 | < | |
329 | > | } |
330 | > | outFile.flush(); |
331 | > | sprintf( checkPointMsg, |
332 | > | "Sucessfully took a dump.\n"); |
333 | > | MPIcheckPoint(); |
334 | > | |
335 | > | // last thing last, enable fatalities. |
336 | > | painCave.isEventLoop = 0; |
337 | > | |
338 | #endif // is_mpi | |
339 | } | |
340 | ||
341 | + | void DumpWriter::writeFinal(double finalTime){ |
342 | ||
343 | + | char finalName[500]; |
344 | + | ofstream finalOut; |
345 | ||
296 | – | void DumpWriter::writeFinal(){ |
297 | – | |
298 | – | |
346 | const int BUFFERSIZE = 2000; | |
347 | < | char tempBuffer[500]; |
348 | < | char writeLine[BUFFERSIZE]; |
302 | < | |
303 | < | char finalName[500]; |
347 | > | char tempBuffer[BUFFERSIZE]; |
348 | > | char writeLine[BUFFERSIZE]; |
349 | ||
305 | – | int i; |
350 | double q[4]; | |
351 | DirectionalAtom* dAtom; | |
352 | int nAtoms = entry_plug->n_atoms; | |
353 | Atom** atoms = entry_plug->atoms; | |
354 | + | int i, j, which_node, done, game_over, which_atom, local_index; |
355 | ||
356 | < | ofstream finalOut; |
356 | > | double pos[3], vel[3]; |
357 | ||
358 | #ifdef IS_MPI | |
359 | if(worldRank == 0 ){ | |
# | Line 335 | Line 380 | void DumpWriter::writeFinal(){ | |
380 | ||
381 | #endif //is_mpi | |
382 | ||
383 | < | |
339 | < | |
383 | > | |
384 | #ifndef IS_MPI | |
385 | ||
386 | finalOut << nAtoms << "\n"; | |
387 | ||
388 | < | finalOut << entry_plug->box_x << "\t" |
389 | < | << entry_plug->box_y << "\t" |
390 | < | << entry_plug->box_z << "\n"; |
388 | > | finalOut << finalTime << ";\t" |
389 | > | << entry_plug->Hmat[0][0] << "\t" |
390 | > | << entry_plug->Hmat[1][0] << "\t" |
391 | > | << entry_plug->Hmat[2][0] << ";\t" |
392 | ||
393 | + | << entry_plug->Hmat[0][1] << "\t" |
394 | + | << entry_plug->Hmat[1][1] << "\t" |
395 | + | << entry_plug->Hmat[2][1] << ";\t" |
396 | + | |
397 | + | << entry_plug->Hmat[0][2] << "\t" |
398 | + | << entry_plug->Hmat[1][2] << "\t" |
399 | + | << entry_plug->Hmat[2][2] << ";\n"; |
400 | + | |
401 | for( i=0; i<nAtoms; i++ ){ | |
402 | ||
403 | + | atoms[i]->getPos(pos); |
404 | + | atoms[i]->getVel(vel); |
405 | + | |
406 | sprintf( tempBuffer, | |
407 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
408 | atoms[i]->getType(), | |
409 | < | atoms[i]->getX(), |
410 | < | atoms[i]->getY(), |
411 | < | atoms[i]->getZ(), |
412 | < | atoms[i]->get_vx(), |
413 | < | atoms[i]->get_vy(), |
414 | < | atoms[i]->get_vz()); |
409 | > | pos[0], |
410 | > | pos[1], |
411 | > | pos[2], |
412 | > | vel[0], |
413 | > | vel[1], |
414 | > | vel[2]); |
415 | strcpy( writeLine, tempBuffer ); | |
416 | ||
417 | if( atoms[i]->isDirectional() ){ | |
# | Line 380 | Line 436 | void DumpWriter::writeFinal(){ | |
436 | finalOut << writeLine; | |
437 | } | |
438 | finalOut.flush(); | |
439 | + | finalOut.close(); |
440 | ||
441 | #else // is_mpi | |
442 | + | |
443 | + | // first thing first, suspend fatalities. |
444 | + | painCave.isEventLoop = 1; |
445 | ||
446 | < | int masterIndex; |
447 | < | int nodeAtomsStart; |
388 | < | int nodeAtomsEnd; |
389 | < | int mpiErr; |
390 | < | int sendError; |
391 | < | int procIndex; |
392 | < | |
393 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
446 | > | int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
447 | > | int haveError; |
448 | ||
449 | < | |
450 | < | // write out header and node 0's coordinates |
449 | > | MPI_Status istatus; |
450 | > | int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
451 | ||
452 | + | // write out header and node 0's coordinates |
453 | + | |
454 | + | haveError = 0; |
455 | if( worldRank == 0 ){ | |
456 | finalOut << mpiSim->getTotAtoms() << "\n"; | |
457 | + | |
458 | + | finalOut << finalTime << ";\t" |
459 | + | << entry_plug->Hmat[0][0] << "\t" |
460 | + | << entry_plug->Hmat[1][0] << "\t" |
461 | + | << entry_plug->Hmat[2][0] << ";\t" |
462 | ||
463 | < | finalOut << entry_plug->box_x << "\t" |
464 | < | << entry_plug->box_y << "\t" |
465 | < | << entry_plug->box_z << "\n"; |
463 | > | << entry_plug->Hmat[0][1] << "\t" |
464 | > | << entry_plug->Hmat[1][1] << "\t" |
465 | > | << entry_plug->Hmat[2][1] << ";\t" |
466 | > | |
467 | > | << entry_plug->Hmat[0][2] << "\t" |
468 | > | << entry_plug->Hmat[1][2] << "\t" |
469 | > | << entry_plug->Hmat[2][2] << ";\n"; |
470 | ||
471 | < | masterIndex = 0; |
472 | < | |
407 | < | for( i=0; i<nAtoms; i++ ){ |
471 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
472 | > | // Get the Node number which has this molecule: |
473 | ||
474 | < | sprintf( tempBuffer, |
475 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
476 | < | atoms[i]->getType(), |
477 | < | atoms[i]->getX(), |
478 | < | atoms[i]->getY(), |
479 | < | atoms[i]->getZ(), |
480 | < | atoms[i]->get_vx(), |
481 | < | atoms[i]->get_vy(), |
482 | < | atoms[i]->get_vz()); |
483 | < | strcpy( writeLine, tempBuffer ); |
484 | < | |
485 | < | if( atoms[i]->isDirectional() ){ |
474 | > | which_node = AtomToProcMap[i]; |
475 | > | |
476 | > | if (which_node == mpiSim->getMyNode()) { |
477 | > | |
478 | > | which_atom = i; |
479 | > | local_index=-1; |
480 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
481 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
482 | > | } |
483 | > | if (local_index != -1) { |
484 | > | |
485 | > | atoms[local_index]->getPos(pos); |
486 | > | atoms[local_index]->getVel(vel); |
487 | ||
488 | < | dAtom = (DirectionalAtom *)atoms[i]; |
489 | < | dAtom->getQ( q ); |
488 | > | sprintf( tempBuffer, |
489 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
490 | > | atoms[local_index]->getType(), |
491 | > | pos[0], |
492 | > | pos[1], |
493 | > | pos[2], |
494 | > | vel[0], |
495 | > | vel[1], |
496 | > | vel[2]); |
497 | > | strcpy( writeLine, tempBuffer ); |
498 | ||
499 | < | sprintf( tempBuffer, |
500 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
501 | < | q[0], |
502 | < | q[1], |
503 | < | q[2], |
504 | < | q[3], |
505 | < | dAtom->getJx(), |
506 | < | dAtom->getJy(), |
507 | < | dAtom->getJz()); |
508 | < | strcat( writeLine, tempBuffer ); |
509 | < | } |
510 | < | else |
511 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
499 | > | if( atoms[local_index]->isDirectional() ){ |
500 | > | |
501 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
502 | > | dAtom->getQ( q ); |
503 | > | |
504 | > | sprintf( tempBuffer, |
505 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
506 | > | q[0], |
507 | > | q[1], |
508 | > | q[2], |
509 | > | q[3], |
510 | > | dAtom->getJx(), |
511 | > | dAtom->getJy(), |
512 | > | dAtom->getJz()); |
513 | > | strcat( writeLine, tempBuffer ); |
514 | > | } |
515 | > | else |
516 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
517 | > | } |
518 | > | else { |
519 | > | sprintf(painCave.errMsg, |
520 | > | "Atom %d not found on processor %d\n", |
521 | > | i, worldRank ); |
522 | > | haveError= 1; |
523 | > | simError(); |
524 | > | } |
525 | > | |
526 | > | if(haveError) nodeZeroError(); |
527 | > | |
528 | > | } |
529 | > | else { |
530 | > | |
531 | > | myStatus = 1; |
532 | > | MPI_Send(&myStatus, 1, MPI_INT, which_node, |
533 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
534 | > | MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
535 | > | MPI_COMM_WORLD); |
536 | > | MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
537 | > | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
538 | > | MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
539 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
540 | ||
541 | + | if(!myStatus) nodeZeroError(); |
542 | + | } |
543 | + | |
544 | finalOut << writeLine; | |
440 | – | masterIndex++; |
545 | } | |
442 | – | finalOut.flush(); |
443 | – | } |
546 | ||
547 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
548 | < | procIndex++){ |
547 | > | // kill everyone off: |
548 | > | myStatus = -1; |
549 | > | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
550 | > | MPI_Send(&myStatus, 1, MPI_INT, j, |
551 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
552 | > | } |
553 | ||
554 | < | if( worldRank == 0 ){ |
555 | < | |
556 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
557 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
554 | > | } else { |
555 | > | |
556 | > | done = 0; |
557 | > | while (!done) { |
558 | ||
559 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
560 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
561 | < | |
562 | < | // Make sure where node 0 is writing to, matches where the |
563 | < | // receiving node expects it to be. |
564 | < | |
565 | < | if (masterIndex != nodeAtomsStart){ |
566 | < | sendError = 1; |
567 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
568 | < | MPI_COMM_WORLD); |
569 | < | sprintf(painCave.errMsg, |
570 | < | "DumpWriter error: atoms start index (%d) for " |
571 | < | "node %d not equal to master index (%d)", |
572 | < | nodeAtomsStart,procIndex,masterIndex ); |
467 | < | painCave.isFatal = 1; |
468 | < | simError(); |
559 | > | MPI_Recv(&myStatus, 1, MPI_INT, 0, |
560 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
561 | > | |
562 | > | if(!myStatus) anonymousNodeDie(); |
563 | > | |
564 | > | if(myStatus < 0) break; |
565 | > | |
566 | > | MPI_Recv(&which_atom, 1, MPI_INT, 0, |
567 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
568 | > | |
569 | > | myStatus = 1; |
570 | > | local_index=-1; |
571 | > | for (j=0; j < mpiSim->getMyNlocal(); j++) { |
572 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
573 | } | |
574 | < | |
471 | < | sendError = 0; |
472 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 | < | MPI_COMM_WORLD); |
574 | > | if (local_index != -1) { |
575 | ||
576 | < | // recieve the nodes writeLines |
576 | > | atoms[local_index]->getPos(pos); |
577 | > | atoms[local_index]->getVel(vel); |
578 | ||
579 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 | < | |
479 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 | < | |
482 | < | finalOut << writeLine; |
483 | < | masterIndex++; |
484 | < | } |
485 | < | |
486 | < | finalOut.flush(); |
487 | < | } |
488 | < | |
489 | < | else if( worldRank == procIndex ){ |
490 | < | |
491 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
492 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
493 | < | |
494 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 | < | MPI_COMM_WORLD); |
496 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 | < | MPI_COMM_WORLD); |
498 | < | |
499 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 | < | MPI_COMM_WORLD, istatus); |
501 | < | if (sendError) MPIcheckPoint(); |
502 | < | |
503 | < | // send current node's configuration line by line. |
504 | < | |
505 | < | for( i=0; i<nAtoms; i++ ){ |
506 | < | |
579 | > | //format the line |
580 | sprintf( tempBuffer, | |
581 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
582 | < | atoms[i]->getType(), |
583 | < | atoms[i]->getX(), |
584 | < | atoms[i]->getY(), |
585 | < | atoms[i]->getZ(), |
586 | < | atoms[i]->get_vx(), |
587 | < | atoms[i]->get_vy(), |
588 | < | atoms[i]->get_vz()); |
582 | > | atoms[local_index]->getType(), |
583 | > | pos[0], |
584 | > | pos[1], |
585 | > | pos[2], |
586 | > | vel[0], |
587 | > | vel[1], |
588 | > | vel[2]); // check here. |
589 | strcpy( writeLine, tempBuffer ); | |
590 | + | |
591 | + | if( atoms[local_index]->isDirectional() ){ |
592 | ||
593 | < | if( atoms[i]->isDirectional() ){ |
519 | < | |
520 | < | dAtom = (DirectionalAtom *)atoms[i]; |
593 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
594 | dAtom->getQ( q ); | |
595 | < | |
595 | > | |
596 | sprintf( tempBuffer, | |
597 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | |
598 | q[0], | |
# | Line 531 | Line 604 | void DumpWriter::writeFinal(){ | |
604 | dAtom->getJz()); | |
605 | strcat( writeLine, tempBuffer ); | |
606 | } | |
607 | < | else |
607 | > | else{ |
608 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
609 | < | |
610 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
611 | < | MPI_COMM_WORLD); |
609 | > | } |
610 | > | } |
611 | > | else { |
612 | > | sprintf(painCave.errMsg, |
613 | > | "Atom %d not found on processor %d\n", |
614 | > | which_atom, worldRank ); |
615 | > | myStatus = 0; |
616 | > | simError(); |
617 | > | |
618 | > | strcpy( writeLine, "Hello, I'm an error.\n"); |
619 | } | |
620 | + | |
621 | + | MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
622 | + | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
623 | + | MPI_Send( &myStatus, 1, MPI_INT, 0, |
624 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
625 | } | |
541 | – | |
542 | – | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
543 | – | procIndex); |
544 | – | MPIcheckPoint(); |
626 | } | |
627 | + | finalOut.flush(); |
628 | + | sprintf( checkPointMsg, |
629 | + | "Sucessfully took a dump.\n"); |
630 | + | MPIcheckPoint(); |
631 | + | |
632 | + | if( worldRank == 0 ) finalOut.close(); |
633 | + | #endif // is_mpi |
634 | + | } |
635 | ||
547 | – | if( worldRank == 0 ) finalOut.close(); |
636 | ||
637 | < | |
638 | < | #endif // is_mpi |
637 | > | |
638 | > | #ifdef IS_MPI |
639 | > | |
640 | > | // a couple of functions to let us escape the write loop |
641 | > | |
642 | > | void dWrite::nodeZeroError( void ){ |
643 | > | int j, myStatus; |
644 | > | |
645 | > | myStatus = 0; |
646 | > | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
647 | > | MPI_Send( &myStatus, 1, MPI_INT, j, |
648 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
649 | > | } |
650 | > | |
651 | > | |
652 | > | MPI_Finalize(); |
653 | > | exit (0); |
654 | > | |
655 | } | |
656 | + | |
657 | + | void dWrite::anonymousNodeDie( void ){ |
658 | + | |
659 | + | MPI_Finalize(); |
660 | + | exit (0); |
661 | + | } |
662 | + | |
663 | + | #endif //is_mpi |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |