# | Line 1 | Line 1 | |
---|---|---|
1 | + | #define _FILE_OFFSET_BITS 64 |
2 | + | |
3 | #include <cstring> | |
4 | #include <iostream> | |
5 | #include <fstream> | |
# | Line 5 | Line 7 | |
7 | #ifdef IS_MPI | |
8 | #include <mpi.h> | |
9 | #include "mpiSimulation.hpp" | |
10 | < | #define TAKE_THIS_TAG 0 |
10 | > | #define TAKE_THIS_TAG_CHAR 1 |
11 | > | #define TAKE_THIS_TAG_INT 2 |
12 | > | |
13 | > | namespace dWrite{ |
14 | > | void nodeZeroError( void ); |
15 | > | void anonymousNodeDie( void ); |
16 | > | } |
17 | > | |
18 | > | using namespace dWrite; |
19 | #endif //is_mpi | |
20 | ||
21 | #include "ReadWrite.hpp" | |
22 | #include "simError.h" | |
23 | ||
14 | – | |
15 | – | |
16 | – | |
17 | – | |
24 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
25 | ||
26 | entry_plug = the_entry_plug; | |
# | Line 23 | Line 29 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
29 | if(worldRank == 0 ){ | |
30 | #endif // is_mpi | |
31 | ||
26 | – | |
27 | – | |
32 | strcpy( outName, entry_plug->sampleName ); | |
33 | ||
34 | outFile.open(outName, ios::out | ios::trunc ); | |
# | Line 37 | Line 41 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
41 | painCave.isFatal = 1; | |
42 | simError(); | |
43 | } | |
44 | < | |
44 | > | |
45 | //outFile.setf( ios::scientific ); | |
46 | ||
47 | #ifdef IS_MPI | |
# | Line 69 | Line 73 | void DumpWriter::writeDump( double currentTime ){ | |
73 | char writeLine[BUFFERSIZE]; | |
74 | ||
75 | int i; | |
76 | + | #ifdef IS_MPI |
77 | + | int j, which_node, done, which_atom, local_index; |
78 | + | #else //is_mpi |
79 | + | int nAtoms = entry_plug->n_atoms; |
80 | + | #endif //is_mpi |
81 | + | |
82 | double q[4]; | |
83 | DirectionalAtom* dAtom; | |
74 | – | int nAtoms = entry_plug->n_atoms; |
84 | Atom** atoms = entry_plug->atoms; | |
85 | + | double pos[3], vel[3]; |
86 | ||
87 | + | |
88 | + | // write current frame to the eor file |
89 | ||
90 | + | this->writeFinal( currentTime ); |
91 | + | |
92 | #ifndef IS_MPI | |
93 | ||
94 | outFile << nAtoms << "\n"; | |
95 | ||
96 | < | outFile << currentTime << "\t" |
97 | < | << entry_plug->box_x << "\t" |
98 | < | << entry_plug->box_y << "\t" |
99 | < | << entry_plug->box_z << "\n"; |
96 | > | outFile << currentTime << ";\t" |
97 | > | << entry_plug->Hmat[0][0] << "\t" |
98 | > | << entry_plug->Hmat[1][0] << "\t" |
99 | > | << entry_plug->Hmat[2][0] << ";\t" |
100 | > | |
101 | > | << entry_plug->Hmat[0][1] << "\t" |
102 | > | << entry_plug->Hmat[1][1] << "\t" |
103 | > | << entry_plug->Hmat[2][1] << ";\t" |
104 | > | |
105 | > | << entry_plug->Hmat[0][2] << "\t" |
106 | > | << entry_plug->Hmat[1][2] << "\t" |
107 | > | << entry_plug->Hmat[2][2] << ";\n"; |
108 | ||
109 | for( i=0; i<nAtoms; i++ ){ | |
110 | ||
111 | + | atoms[i]->getPos(pos); |
112 | + | atoms[i]->getVel(vel); |
113 | ||
114 | sprintf( tempBuffer, | |
115 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
116 | atoms[i]->getType(), | |
117 | < | atoms[i]->getX(), |
118 | < | atoms[i]->getY(), |
119 | < | atoms[i]->getZ(), |
120 | < | atoms[i]->get_vx(), |
121 | < | atoms[i]->get_vy(), |
122 | < | atoms[i]->get_vz()); |
117 | > | pos[0], |
118 | > | pos[1], |
119 | > | pos[2], |
120 | > | vel[0], |
121 | > | vel[1], |
122 | > | vel[2]); |
123 | strcpy( writeLine, tempBuffer ); | |
124 | ||
125 | if( atoms[i]->isDirectional() ){ | |
# | Line 123 | Line 147 | void DumpWriter::writeDump( double currentTime ){ | |
147 | ||
148 | #else // is_mpi | |
149 | ||
150 | < | int masterIndex; |
151 | < | int nodeAtomsStart; |
128 | < | int nodeAtomsEnd; |
129 | < | int mpiErr; |
130 | < | int sendError; |
131 | < | int procIndex; |
132 | < | |
133 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
150 | > | // first thing first, suspend fatalities. |
151 | > | painCave.isEventLoop = 1; |
152 | ||
153 | < | |
154 | < | // write out header and node 0's coordinates |
153 | > | int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
154 | > | int haveError; |
155 | ||
156 | + | MPI_Status istatus; |
157 | + | int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
158 | + | |
159 | + | // write out header and node 0's coordinates |
160 | + | |
161 | if( worldRank == 0 ){ | |
162 | outFile << mpiSim->getTotAtoms() << "\n"; | |
163 | + | |
164 | + | outFile << currentTime << ";\t" |
165 | + | << entry_plug->Hmat[0][0] << "\t" |
166 | + | << entry_plug->Hmat[1][0] << "\t" |
167 | + | << entry_plug->Hmat[2][0] << ";\t" |
168 | ||
169 | < | outFile << currentTime << "\t" |
170 | < | << entry_plug->box_x << "\t" |
171 | < | << entry_plug->box_y << "\t" |
144 | < | << entry_plug->box_z << "\n"; |
145 | < | |
146 | < | masterIndex = 0; |
147 | < | for( i=0; i<nAtoms; i++ ){ |
169 | > | << entry_plug->Hmat[0][1] << "\t" |
170 | > | << entry_plug->Hmat[1][1] << "\t" |
171 | > | << entry_plug->Hmat[2][1] << ";\t" |
172 | ||
173 | < | sprintf( tempBuffer, |
174 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
175 | < | atoms[i]->getType(), |
152 | < | atoms[i]->getX(), |
153 | < | atoms[i]->getY(), |
154 | < | atoms[i]->getZ(), |
155 | < | atoms[i]->get_vx(), |
156 | < | atoms[i]->get_vy(), |
157 | < | atoms[i]->get_vz()); |
158 | < | strcpy( writeLine, tempBuffer ); |
159 | < | |
160 | < | if( atoms[i]->isDirectional() ){ |
161 | < | |
162 | < | dAtom = (DirectionalAtom *)atoms[i]; |
163 | < | dAtom->getQ( q ); |
164 | < | |
165 | < | sprintf( tempBuffer, |
166 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
167 | < | q[0], |
168 | < | q[1], |
169 | < | q[2], |
170 | < | q[3], |
171 | < | dAtom->getJx(), |
172 | < | dAtom->getJy(), |
173 | < | dAtom->getJz()); |
174 | < | strcat( writeLine, tempBuffer ); |
175 | < | } |
176 | < | else |
177 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
178 | < | |
179 | < | outFile << writeLine; |
180 | < | masterIndex++; |
181 | < | } |
182 | < | outFile.flush(); |
183 | < | } |
184 | < | |
185 | < | sprintf( checkPointMsg, |
186 | < | "Sucessfully wrote node 0's dump configuration.\n"); |
187 | < | MPIcheckPoint(); |
173 | > | << entry_plug->Hmat[0][2] << "\t" |
174 | > | << entry_plug->Hmat[1][2] << "\t" |
175 | > | << entry_plug->Hmat[2][2] << ";\n"; |
176 | ||
177 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
178 | < | procIndex++){ |
179 | < | |
192 | < | if( worldRank == 0 ){ |
177 | > | outFile.flush(); |
178 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
179 | > | // Get the Node number which has this atom; |
180 | ||
181 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
195 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
181 | > | which_node = AtomToProcMap[i]; |
182 | ||
183 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
199 | < | |
200 | < | // Make sure where node 0 is writing to, matches where the |
201 | < | // receiving node expects it to be. |
202 | < | |
203 | < | if (masterIndex != nodeAtomsStart){ |
204 | < | sendError = 1; |
205 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 | < | MPI_COMM_WORLD); |
207 | < | sprintf(painCave.errMsg, |
208 | < | "DumpWriter error: atoms start index (%d) for " |
209 | < | "node %d not equal to master index (%d)", |
210 | < | nodeAtomsStart,procIndex,masterIndex ); |
211 | < | painCave.isFatal = 1; |
212 | < | simError(); |
213 | < | } |
214 | < | |
215 | < | sendError = 0; |
216 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 | < | MPI_COMM_WORLD); |
218 | < | |
219 | < | // recieve the nodes writeLines |
220 | < | |
221 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
183 | > | if (which_node == 0 ) { |
184 | ||
185 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
186 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
187 | < | |
188 | < | outFile << writeLine; |
189 | < | masterIndex++; |
185 | > | haveError = 0; |
186 | > | which_atom = i; |
187 | > | local_index=-1; |
188 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
189 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
190 | > | } |
191 | > | if (local_index != -1) { |
192 | > | //format the line |
193 | > | |
194 | > | atoms[local_index]->getPos(pos); |
195 | > | atoms[local_index]->getVel(vel); |
196 | > | |
197 | > | sprintf( tempBuffer, |
198 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
199 | > | atoms[local_index]->getType(), |
200 | > | pos[0], |
201 | > | pos[1], |
202 | > | pos[2], |
203 | > | vel[0], |
204 | > | vel[1], |
205 | > | vel[2]); // check here. |
206 | > | strcpy( writeLine, tempBuffer ); |
207 | > | |
208 | > | if( atoms[local_index]->isDirectional() ){ |
209 | > | |
210 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
211 | > | dAtom->getQ( q ); |
212 | > | |
213 | > | sprintf( tempBuffer, |
214 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
215 | > | q[0], |
216 | > | q[1], |
217 | > | q[2], |
218 | > | q[3], |
219 | > | dAtom->getJx(), |
220 | > | dAtom->getJy(), |
221 | > | dAtom->getJz()); |
222 | > | strcat( writeLine, tempBuffer ); |
223 | > | |
224 | > | } |
225 | > | else |
226 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
227 | > | } |
228 | > | else { |
229 | > | sprintf(painCave.errMsg, |
230 | > | "Atom %d not found on processor %d\n", |
231 | > | i, worldRank ); |
232 | > | haveError= 1; |
233 | > | simError(); |
234 | > | } |
235 | > | |
236 | > | if(haveError) nodeZeroError(); |
237 | > | |
238 | } | |
239 | + | else { |
240 | + | myStatus = 1; |
241 | + | MPI_Send(&myStatus, 1, MPI_INT, which_node, |
242 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
243 | + | MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
244 | + | MPI_COMM_WORLD); |
245 | + | MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
246 | + | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
247 | + | MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
248 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
249 | + | |
250 | + | if(!myStatus) nodeZeroError(); |
251 | + | |
252 | + | } |
253 | + | |
254 | + | outFile << writeLine; |
255 | + | outFile.flush(); |
256 | } | |
257 | + | |
258 | + | // kill everyone off: |
259 | + | myStatus = -1; |
260 | + | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
261 | + | MPI_Send(&myStatus, 1, MPI_INT, j, |
262 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
263 | + | } |
264 | ||
265 | < | else if( worldRank == procIndex ){ |
265 | > | } else { |
266 | > | |
267 | > | done = 0; |
268 | > | while (!done) { |
269 | > | |
270 | > | MPI_Recv(&myStatus, 1, MPI_INT, 0, |
271 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
272 | ||
273 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
274 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
273 | > | if(!myStatus) anonymousNodeDie(); |
274 | > | |
275 | > | if(myStatus < 0) break; |
276 | ||
277 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
278 | < | MPI_COMM_WORLD); |
279 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
280 | < | MPI_COMM_WORLD); |
281 | < | |
282 | < | sendError = -1; |
283 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
284 | < | MPI_COMM_WORLD, istatus); |
277 | > | MPI_Recv(&which_atom, 1, MPI_INT, 0, |
278 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
279 | > | |
280 | > | myStatus = 1; |
281 | > | local_index=-1; |
282 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
283 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
284 | > | } |
285 | > | if (local_index != -1) { |
286 | > | //format the line |
287 | ||
288 | < | if (sendError) MPIcheckPoint(); |
288 | > | atoms[local_index]->getPos(pos); |
289 | > | atoms[local_index]->getVel(vel); |
290 | ||
247 | – | // send current node's configuration line by line. |
248 | – | |
249 | – | for( i=0; i<nAtoms; i++ ){ |
250 | – | |
291 | sprintf( tempBuffer, | |
292 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
293 | < | atoms[i]->getType(), |
294 | < | atoms[i]->getX(), |
295 | < | atoms[i]->getY(), |
296 | < | atoms[i]->getZ(), |
297 | < | atoms[i]->get_vx(), |
298 | < | atoms[i]->get_vy(), |
299 | < | atoms[i]->get_vz()); // check here. |
293 | > | atoms[local_index]->getType(), |
294 | > | pos[0], |
295 | > | pos[1], |
296 | > | pos[2], |
297 | > | vel[0], |
298 | > | vel[1], |
299 | > | vel[2]); // check here. |
300 | strcpy( writeLine, tempBuffer ); | |
301 | + | |
302 | + | if( atoms[local_index]->isDirectional() ){ |
303 | ||
304 | < | if( atoms[i]->isDirectional() ){ |
263 | < | |
264 | < | dAtom = (DirectionalAtom *)atoms[i]; |
304 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
305 | dAtom->getQ( q ); | |
306 | < | |
306 | > | |
307 | sprintf( tempBuffer, | |
308 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | |
309 | q[0], | |
# | Line 275 | Line 315 | void DumpWriter::writeDump( double currentTime ){ | |
315 | dAtom->getJz()); | |
316 | strcat( writeLine, tempBuffer ); | |
317 | } | |
318 | < | else |
318 | > | else{ |
319 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
320 | < | |
321 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
322 | < | MPI_COMM_WORLD); |
320 | > | } |
321 | > | } |
322 | > | else { |
323 | > | sprintf(painCave.errMsg, |
324 | > | "Atom %d not found on processor %d\n", |
325 | > | which_atom, worldRank ); |
326 | > | myStatus = 0; |
327 | > | simError(); |
328 | > | |
329 | > | strcpy( writeLine, "Hello, I'm an error.\n"); |
330 | } | |
331 | + | |
332 | + | MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
333 | + | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
334 | + | MPI_Send( &myStatus, 1, MPI_INT, 0, |
335 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
336 | } | |
337 | < | |
338 | < | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
339 | < | procIndex); |
340 | < | MPIcheckPoint(); |
341 | < | } |
342 | < | |
337 | > | } |
338 | > | outFile.flush(); |
339 | > | sprintf( checkPointMsg, |
340 | > | "Sucessfully took a dump.\n"); |
341 | > | MPIcheckPoint(); |
342 | > | |
343 | > | // last thing last, enable fatalities. |
344 | > | painCave.isEventLoop = 0; |
345 | > | |
346 | #endif // is_mpi | |
347 | } | |
348 | ||
349 | + | void DumpWriter::writeFinal(double finalTime){ |
350 | ||
351 | + | char finalName[500]; |
352 | + | ofstream finalOut; |
353 | ||
296 | – | void DumpWriter::writeFinal(){ |
297 | – | |
298 | – | |
354 | const int BUFFERSIZE = 2000; | |
355 | < | char tempBuffer[500]; |
356 | < | char writeLine[BUFFERSIZE]; |
302 | < | |
303 | < | char finalName[500]; |
355 | > | char tempBuffer[BUFFERSIZE]; |
356 | > | char writeLine[BUFFERSIZE]; |
357 | ||
305 | – | int i; |
358 | double q[4]; | |
359 | DirectionalAtom* dAtom; | |
308 | – | int nAtoms = entry_plug->n_atoms; |
360 | Atom** atoms = entry_plug->atoms; | |
361 | + | int i; |
362 | + | #ifdef IS_MPI |
363 | + | int j, which_node, done, which_atom, local_index; |
364 | + | #else //is_mpi |
365 | + | int nAtoms = entry_plug->n_atoms; |
366 | + | #endif //is_mpi |
367 | ||
368 | < | ofstream finalOut; |
368 | > | double pos[3], vel[3]; |
369 | ||
370 | #ifdef IS_MPI | |
371 | if(worldRank == 0 ){ | |
# | Line 335 | Line 392 | void DumpWriter::writeFinal(){ | |
392 | ||
393 | #endif //is_mpi | |
394 | ||
395 | < | |
339 | < | |
395 | > | |
396 | #ifndef IS_MPI | |
397 | ||
398 | finalOut << nAtoms << "\n"; | |
399 | ||
400 | < | finalOut << entry_plug->box_x << "\t" |
401 | < | << entry_plug->box_y << "\t" |
402 | < | << entry_plug->box_z << "\n"; |
400 | > | finalOut << finalTime << ";\t" |
401 | > | << entry_plug->Hmat[0][0] << "\t" |
402 | > | << entry_plug->Hmat[1][0] << "\t" |
403 | > | << entry_plug->Hmat[2][0] << ";\t" |
404 | > | |
405 | > | << entry_plug->Hmat[0][1] << "\t" |
406 | > | << entry_plug->Hmat[1][1] << "\t" |
407 | > | << entry_plug->Hmat[2][1] << ";\t" |
408 | ||
409 | + | << entry_plug->Hmat[0][2] << "\t" |
410 | + | << entry_plug->Hmat[1][2] << "\t" |
411 | + | << entry_plug->Hmat[2][2] << ";\n"; |
412 | + | |
413 | for( i=0; i<nAtoms; i++ ){ | |
414 | ||
415 | + | atoms[i]->getPos(pos); |
416 | + | atoms[i]->getVel(vel); |
417 | + | |
418 | sprintf( tempBuffer, | |
419 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
420 | atoms[i]->getType(), | |
421 | < | atoms[i]->getX(), |
422 | < | atoms[i]->getY(), |
423 | < | atoms[i]->getZ(), |
424 | < | atoms[i]->get_vx(), |
425 | < | atoms[i]->get_vy(), |
426 | < | atoms[i]->get_vz()); |
421 | > | pos[0], |
422 | > | pos[1], |
423 | > | pos[2], |
424 | > | vel[0], |
425 | > | vel[1], |
426 | > | vel[2]); |
427 | strcpy( writeLine, tempBuffer ); | |
428 | ||
429 | if( atoms[i]->isDirectional() ){ | |
# | Line 380 | Line 448 | void DumpWriter::writeFinal(){ | |
448 | finalOut << writeLine; | |
449 | } | |
450 | finalOut.flush(); | |
451 | + | finalOut.close(); |
452 | ||
453 | #else // is_mpi | |
454 | + | |
455 | + | // first thing first, suspend fatalities. |
456 | + | painCave.isEventLoop = 1; |
457 | ||
458 | < | int masterIndex; |
459 | < | int nodeAtomsStart; |
388 | < | int nodeAtomsEnd; |
389 | < | int mpiErr; |
390 | < | int sendError; |
391 | < | int procIndex; |
392 | < | |
393 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
458 | > | int myStatus; // 1 = wakeup & success; 0 = error; -1 = AllDone |
459 | > | int haveError; |
460 | ||
461 | < | |
462 | < | // write out header and node 0's coordinates |
461 | > | MPI_Status istatus; |
462 | > | int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
463 | ||
464 | + | // write out header and node 0's coordinates |
465 | + | |
466 | + | haveError = 0; |
467 | if( worldRank == 0 ){ | |
468 | finalOut << mpiSim->getTotAtoms() << "\n"; | |
469 | + | |
470 | + | finalOut << finalTime << ";\t" |
471 | + | << entry_plug->Hmat[0][0] << "\t" |
472 | + | << entry_plug->Hmat[1][0] << "\t" |
473 | + | << entry_plug->Hmat[2][0] << ";\t" |
474 | ||
475 | < | finalOut << entry_plug->box_x << "\t" |
476 | < | << entry_plug->box_y << "\t" |
477 | < | << entry_plug->box_z << "\n"; |
475 | > | << entry_plug->Hmat[0][1] << "\t" |
476 | > | << entry_plug->Hmat[1][1] << "\t" |
477 | > | << entry_plug->Hmat[2][1] << ";\t" |
478 | > | |
479 | > | << entry_plug->Hmat[0][2] << "\t" |
480 | > | << entry_plug->Hmat[1][2] << "\t" |
481 | > | << entry_plug->Hmat[2][2] << ";\n"; |
482 | ||
483 | < | masterIndex = 0; |
484 | < | |
407 | < | for( i=0; i<nAtoms; i++ ){ |
483 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
484 | > | // Get the Node number which has this molecule: |
485 | ||
486 | < | sprintf( tempBuffer, |
487 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
488 | < | atoms[i]->getType(), |
489 | < | atoms[i]->getX(), |
490 | < | atoms[i]->getY(), |
491 | < | atoms[i]->getZ(), |
492 | < | atoms[i]->get_vx(), |
493 | < | atoms[i]->get_vy(), |
494 | < | atoms[i]->get_vz()); |
495 | < | strcpy( writeLine, tempBuffer ); |
496 | < | |
497 | < | if( atoms[i]->isDirectional() ){ |
486 | > | which_node = AtomToProcMap[i]; |
487 | > | |
488 | > | if (which_node == mpiSim->getMyNode()) { |
489 | > | |
490 | > | which_atom = i; |
491 | > | local_index=-1; |
492 | > | for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
493 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
494 | > | } |
495 | > | if (local_index != -1) { |
496 | > | |
497 | > | atoms[local_index]->getPos(pos); |
498 | > | atoms[local_index]->getVel(vel); |
499 | ||
500 | < | dAtom = (DirectionalAtom *)atoms[i]; |
501 | < | dAtom->getQ( q ); |
500 | > | sprintf( tempBuffer, |
501 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
502 | > | atoms[local_index]->getType(), |
503 | > | pos[0], |
504 | > | pos[1], |
505 | > | pos[2], |
506 | > | vel[0], |
507 | > | vel[1], |
508 | > | vel[2]); |
509 | > | strcpy( writeLine, tempBuffer ); |
510 | ||
511 | < | sprintf( tempBuffer, |
512 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
513 | < | q[0], |
514 | < | q[1], |
515 | < | q[2], |
516 | < | q[3], |
517 | < | dAtom->getJx(), |
518 | < | dAtom->getJy(), |
519 | < | dAtom->getJz()); |
520 | < | strcat( writeLine, tempBuffer ); |
521 | < | } |
522 | < | else |
523 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
511 | > | if( atoms[local_index]->isDirectional() ){ |
512 | > | |
513 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
514 | > | dAtom->getQ( q ); |
515 | > | |
516 | > | sprintf( tempBuffer, |
517 | > | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
518 | > | q[0], |
519 | > | q[1], |
520 | > | q[2], |
521 | > | q[3], |
522 | > | dAtom->getJx(), |
523 | > | dAtom->getJy(), |
524 | > | dAtom->getJz()); |
525 | > | strcat( writeLine, tempBuffer ); |
526 | > | } |
527 | > | else |
528 | > | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
529 | > | } |
530 | > | else { |
531 | > | sprintf(painCave.errMsg, |
532 | > | "Atom %d not found on processor %d\n", |
533 | > | i, worldRank ); |
534 | > | haveError= 1; |
535 | > | simError(); |
536 | > | } |
537 | > | |
538 | > | if(haveError) nodeZeroError(); |
539 | > | |
540 | > | } |
541 | > | else { |
542 | > | |
543 | > | myStatus = 1; |
544 | > | MPI_Send(&myStatus, 1, MPI_INT, which_node, |
545 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
546 | > | MPI_Send(&i, 1, MPI_INT, which_node, TAKE_THIS_TAG_INT, |
547 | > | MPI_COMM_WORLD); |
548 | > | MPI_Recv(writeLine, BUFFERSIZE, MPI_CHAR, which_node, |
549 | > | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD, &istatus); |
550 | > | MPI_Recv(&myStatus, 1, MPI_INT, which_node, |
551 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
552 | ||
553 | + | if(!myStatus) nodeZeroError(); |
554 | + | } |
555 | + | |
556 | finalOut << writeLine; | |
440 | – | masterIndex++; |
557 | } | |
442 | – | finalOut.flush(); |
443 | – | } |
558 | ||
559 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
560 | < | procIndex++){ |
559 | > | // kill everyone off: |
560 | > | myStatus = -1; |
561 | > | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
562 | > | MPI_Send(&myStatus, 1, MPI_INT, j, |
563 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
564 | > | } |
565 | ||
566 | < | if( worldRank == 0 ){ |
567 | < | |
568 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
569 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
566 | > | } else { |
567 | > | |
568 | > | done = 0; |
569 | > | while (!done) { |
570 | ||
571 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
572 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
573 | < | |
574 | < | // Make sure where node 0 is writing to, matches where the |
575 | < | // receiving node expects it to be. |
576 | < | |
577 | < | if (masterIndex != nodeAtomsStart){ |
578 | < | sendError = 1; |
579 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
580 | < | MPI_COMM_WORLD); |
581 | < | sprintf(painCave.errMsg, |
582 | < | "DumpWriter error: atoms start index (%d) for " |
583 | < | "node %d not equal to master index (%d)", |
584 | < | nodeAtomsStart,procIndex,masterIndex ); |
467 | < | painCave.isFatal = 1; |
468 | < | simError(); |
571 | > | MPI_Recv(&myStatus, 1, MPI_INT, 0, |
572 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
573 | > | |
574 | > | if(!myStatus) anonymousNodeDie(); |
575 | > | |
576 | > | if(myStatus < 0) break; |
577 | > | |
578 | > | MPI_Recv(&which_atom, 1, MPI_INT, 0, |
579 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD, &istatus); |
580 | > | |
581 | > | myStatus = 1; |
582 | > | local_index=-1; |
583 | > | for (j=0; j < mpiSim->getMyNlocal(); j++) { |
584 | > | if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
585 | } | |
586 | < | |
471 | < | sendError = 0; |
472 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 | < | MPI_COMM_WORLD); |
586 | > | if (local_index != -1) { |
587 | ||
588 | < | // recieve the nodes writeLines |
588 | > | atoms[local_index]->getPos(pos); |
589 | > | atoms[local_index]->getVel(vel); |
590 | ||
591 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
478 | < | |
479 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
480 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
481 | < | |
482 | < | finalOut << writeLine; |
483 | < | masterIndex++; |
484 | < | } |
485 | < | |
486 | < | finalOut.flush(); |
487 | < | } |
488 | < | |
489 | < | else if( worldRank == procIndex ){ |
490 | < | |
491 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
492 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
493 | < | |
494 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
495 | < | MPI_COMM_WORLD); |
496 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 | < | MPI_COMM_WORLD); |
498 | < | |
499 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 | < | MPI_COMM_WORLD, istatus); |
501 | < | if (sendError) MPIcheckPoint(); |
502 | < | |
503 | < | // send current node's configuration line by line. |
504 | < | |
505 | < | for( i=0; i<nAtoms; i++ ){ |
506 | < | |
591 | > | //format the line |
592 | sprintf( tempBuffer, | |
593 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
594 | < | atoms[i]->getType(), |
595 | < | atoms[i]->getX(), |
596 | < | atoms[i]->getY(), |
597 | < | atoms[i]->getZ(), |
598 | < | atoms[i]->get_vx(), |
599 | < | atoms[i]->get_vy(), |
600 | < | atoms[i]->get_vz()); |
594 | > | atoms[local_index]->getType(), |
595 | > | pos[0], |
596 | > | pos[1], |
597 | > | pos[2], |
598 | > | vel[0], |
599 | > | vel[1], |
600 | > | vel[2]); // check here. |
601 | strcpy( writeLine, tempBuffer ); | |
602 | + | |
603 | + | if( atoms[local_index]->isDirectional() ){ |
604 | ||
605 | < | if( atoms[i]->isDirectional() ){ |
519 | < | |
520 | < | dAtom = (DirectionalAtom *)atoms[i]; |
605 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
606 | dAtom->getQ( q ); | |
607 | < | |
607 | > | |
608 | sprintf( tempBuffer, | |
609 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | |
610 | q[0], | |
# | Line 531 | Line 616 | void DumpWriter::writeFinal(){ | |
616 | dAtom->getJz()); | |
617 | strcat( writeLine, tempBuffer ); | |
618 | } | |
619 | < | else |
619 | > | else{ |
620 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
621 | < | |
622 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
623 | < | MPI_COMM_WORLD); |
621 | > | } |
622 | > | } |
623 | > | else { |
624 | > | sprintf(painCave.errMsg, |
625 | > | "Atom %d not found on processor %d\n", |
626 | > | which_atom, worldRank ); |
627 | > | myStatus = 0; |
628 | > | simError(); |
629 | > | |
630 | > | strcpy( writeLine, "Hello, I'm an error.\n"); |
631 | } | |
632 | + | |
633 | + | MPI_Send(writeLine, BUFFERSIZE, MPI_CHAR, 0, |
634 | + | TAKE_THIS_TAG_CHAR, MPI_COMM_WORLD); |
635 | + | MPI_Send( &myStatus, 1, MPI_INT, 0, |
636 | + | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
637 | } | |
541 | – | |
542 | – | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
543 | – | procIndex); |
544 | – | MPIcheckPoint(); |
638 | } | |
639 | + | finalOut.flush(); |
640 | + | sprintf( checkPointMsg, |
641 | + | "Sucessfully took a dump.\n"); |
642 | + | MPIcheckPoint(); |
643 | + | |
644 | + | if( worldRank == 0 ) finalOut.close(); |
645 | + | #endif // is_mpi |
646 | + | } |
647 | ||
547 | – | if( worldRank == 0 ) finalOut.close(); |
648 | ||
649 | < | |
650 | < | #endif // is_mpi |
649 | > | |
650 | > | #ifdef IS_MPI |
651 | > | |
652 | > | // a couple of functions to let us escape the write loop |
653 | > | |
654 | > | void dWrite::nodeZeroError( void ){ |
655 | > | int j, myStatus; |
656 | > | |
657 | > | myStatus = 0; |
658 | > | for (j = 0; j < mpiSim->getNumberProcessors(); j++) { |
659 | > | MPI_Send( &myStatus, 1, MPI_INT, j, |
660 | > | TAKE_THIS_TAG_INT, MPI_COMM_WORLD); |
661 | > | } |
662 | > | |
663 | > | |
664 | > | MPI_Finalize(); |
665 | > | exit (0); |
666 | > | |
667 | } | |
668 | + | |
669 | + | void dWrite::anonymousNodeDie( void ){ |
670 | + | |
671 | + | MPI_Finalize(); |
672 | + | exit (0); |
673 | + | } |
674 | + | |
675 | + | #endif //is_mpi |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |