# | Line 1 | Line 1 | |
---|---|---|
1 | < | #include <cstring> |
1 | > | #define _FILE_OFFSET_BITS 64 |
2 | > | |
3 | > | #include <string.h> |
4 | #include <iostream> | |
5 | #include <fstream> | |
6 | + | #include <algorithm> |
7 | + | #include <utility> |
8 | ||
9 | #ifdef IS_MPI | |
10 | #include <mpi.h> | |
11 | #include "mpiSimulation.hpp" | |
12 | < | #define TAKE_THIS_TAG 0 |
12 | > | |
13 | > | namespace dWrite{ |
14 | > | void DieDieDie( void ); |
15 | > | } |
16 | > | |
17 | > | using namespace dWrite; |
18 | #endif //is_mpi | |
19 | ||
20 | #include "ReadWrite.hpp" | |
21 | #include "simError.h" | |
22 | ||
14 | – | |
15 | – | |
16 | – | |
17 | – | |
23 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
24 | ||
25 | entry_plug = the_entry_plug; | |
# | Line 22 | Line 27 | DumpWriter::DumpWriter( SimInfo* the_entry_plug ){ | |
27 | #ifdef IS_MPI | |
28 | if(worldRank == 0 ){ | |
29 | #endif // is_mpi | |
30 | < | |
31 | < | |
32 | < | |
33 | < | strcpy( outName, entry_plug->sampleName ); |
34 | < | |
30 | < | outFile.open(outName, ios::out | ios::trunc ); |
31 | < | |
32 | < | if( !outFile ){ |
33 | < | |
30 | > | |
31 | > | dumpFile.open(entry_plug->sampleName, ios::out | ios::trunc ); |
32 | > | |
33 | > | if( !dumpFile ){ |
34 | > | |
35 | sprintf( painCave.errMsg, | |
36 | "Could not open \"%s\" for dump output.\n", | |
37 | < | outName); |
37 | > | entry_plug->sampleName); |
38 | painCave.isFatal = 1; | |
39 | simError(); | |
40 | } | |
40 | – | |
41 | – | //outFile.setf( ios::scientific ); |
41 | ||
42 | #ifdef IS_MPI | |
43 | } | |
44 | ||
45 | + | //sort the local atoms by global index |
46 | + | sortByGlobalIndex(); |
47 | + | |
48 | sprintf( checkPointMsg, | |
49 | "Sucessfully opened output file for dumping.\n"); | |
50 | MPIcheckPoint(); | |
# | Line 55 | Line 57 | DumpWriter::~DumpWriter( ){ | |
57 | if(worldRank == 0 ){ | |
58 | #endif // is_mpi | |
59 | ||
60 | < | outFile.close(); |
60 | > | dumpFile.close(); |
61 | ||
62 | #ifdef IS_MPI | |
63 | } | |
64 | #endif // is_mpi | |
65 | } | |
66 | ||
67 | < | void DumpWriter::writeDump( double currentTime ){ |
67 | > | #ifdef IS_MPI |
68 | > | |
69 | > | /** |
70 | > | * A hook function to load balancing |
71 | > | */ |
72 | > | |
73 | > | void DumpWriter::update(){ |
74 | > | sortByGlobalIndex(); |
75 | > | } |
76 | ||
77 | < | const int BUFFERSIZE = 2000; |
78 | < | char tempBuffer[BUFFERSIZE]; |
79 | < | char writeLine[BUFFERSIZE]; |
77 | > | /** |
78 | > | * Auxiliary sorting function |
79 | > | */ |
80 | > | |
81 | > | bool indexSortingCriterion(const pair<int, int>& p1, const pair<int, int>& p2){ |
82 | > | return p1.second < p2.second; |
83 | > | } |
84 | ||
85 | < | int i; |
86 | < | double q[4]; |
87 | < | DirectionalAtom* dAtom; |
88 | < | int nAtoms = entry_plug->n_atoms; |
85 | > | /** |
86 | > | * Sorting the local index by global index |
87 | > | */ |
88 | > | |
89 | > | void DumpWriter::sortByGlobalIndex(){ |
90 | Atom** atoms = entry_plug->atoms; | |
91 | < | |
91 | > | |
92 | > | indexArray.clear(); |
93 | > | |
94 | > | for(int i = 0; i < mpiSim->getMyNlocal();i++) |
95 | > | indexArray.push_back(make_pair(i, atoms[i]->getGlobalIndex())); |
96 | > | |
97 | > | sort(indexArray.begin(), indexArray.end(), indexSortingCriterion); |
98 | ||
99 | < | #ifndef IS_MPI |
99 | > | //for (int i = 0; i < mpiSim->getMyNlocal(); i++) { |
100 | > | // printf("node %d has global %d at local %d\n", worldRank, indexArray[i].second, indexArray[i].first); |
101 | > | //} |
102 | ||
103 | < | outFile << nAtoms << "\n"; |
81 | < | |
82 | < | outFile << currentTime << "\t" |
83 | < | << entry_plug->box_x << "\t" |
84 | < | << entry_plug->box_y << "\t" |
85 | < | << entry_plug->box_z << "\n"; |
86 | < | |
87 | < | for( i=0; i<nAtoms; i++ ){ |
88 | < | |
103 | > | } |
104 | ||
105 | < | sprintf( tempBuffer, |
91 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
92 | < | atoms[i]->getType(), |
93 | < | atoms[i]->getX(), |
94 | < | atoms[i]->getY(), |
95 | < | atoms[i]->getZ(), |
96 | < | atoms[i]->get_vx(), |
97 | < | atoms[i]->get_vy(), |
98 | < | atoms[i]->get_vz()); |
99 | < | strcpy( writeLine, tempBuffer ); |
105 | > | #endif |
106 | ||
107 | < | if( atoms[i]->isDirectional() ){ |
108 | < | |
109 | < | dAtom = (DirectionalAtom *)atoms[i]; |
110 | < | dAtom->getQ( q ); |
111 | < | |
112 | < | sprintf( tempBuffer, |
113 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
114 | < | q[0], |
115 | < | q[1], |
116 | < | q[2], |
117 | < | q[3], |
118 | < | dAtom->getJx(), |
119 | < | dAtom->getJy(), |
120 | < | dAtom->getJz()); |
121 | < | strcat( writeLine, tempBuffer ); |
107 | > | void DumpWriter::writeDump(double currentTime){ |
108 | > | |
109 | > | ofstream finalOut; |
110 | > | vector<ofstream*> fileStreams; |
111 | > | |
112 | > | #ifdef IS_MPI |
113 | > | if(worldRank == 0 ){ |
114 | > | #endif |
115 | > | finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
116 | > | if( !finalOut ){ |
117 | > | sprintf( painCave.errMsg, |
118 | > | "Could not open \"%s\" for final dump output.\n", |
119 | > | entry_plug->finalName ); |
120 | > | painCave.isFatal = 1; |
121 | > | simError(); |
122 | } | |
123 | < | else |
118 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
119 | < | |
120 | < | outFile << writeLine; |
123 | > | #ifdef IS_MPI |
124 | } | |
125 | < | outFile.flush(); |
125 | > | #endif // is_mpi |
126 | ||
127 | < | #else // is_mpi |
127 | > | fileStreams.push_back(&finalOut); |
128 | > | fileStreams.push_back(&dumpFile); |
129 | ||
130 | < | int masterIndex; |
127 | < | int nodeAtomsStart; |
128 | < | int nodeAtomsEnd; |
129 | < | int mpiErr; |
130 | < | int sendError; |
131 | < | int procIndex; |
132 | < | |
133 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
130 | > | writeFrame(fileStreams, currentTime); |
131 | ||
132 | < | |
133 | < | // write out header and node 0's coordinates |
132 | > | #ifdef IS_MPI |
133 | > | finalOut.close(); |
134 | > | #endif |
135 | > | |
136 | > | } |
137 | ||
138 | < | if( worldRank == 0 ){ |
139 | < | outFile << mpiSim->getTotAtoms() << "\n"; |
140 | < | |
141 | < | outFile << currentTime << "\t" |
142 | < | << entry_plug->box_x << "\t" |
143 | < | << entry_plug->box_y << "\t" |
144 | < | << entry_plug->box_z << "\n"; |
138 | > | void DumpWriter::writeFinal(double currentTime){ |
139 | ||
140 | < | masterIndex = 0; |
141 | < | for( i=0; i<nAtoms; i++ ){ |
148 | < | |
149 | < | sprintf( tempBuffer, |
150 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
151 | < | atoms[i]->getType(), |
152 | < | atoms[i]->getX(), |
153 | < | atoms[i]->getY(), |
154 | < | atoms[i]->getZ(), |
155 | < | atoms[i]->get_vx(), |
156 | < | atoms[i]->get_vy(), |
157 | < | atoms[i]->get_vz()); |
158 | < | strcpy( writeLine, tempBuffer ); |
159 | < | |
160 | < | if( atoms[i]->isDirectional() ){ |
161 | < | |
162 | < | dAtom = (DirectionalAtom *)atoms[i]; |
163 | < | dAtom->getQ( q ); |
164 | < | |
165 | < | sprintf( tempBuffer, |
166 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
167 | < | q[0], |
168 | < | q[1], |
169 | < | q[2], |
170 | < | q[3], |
171 | < | dAtom->getJx(), |
172 | < | dAtom->getJy(), |
173 | < | dAtom->getJz()); |
174 | < | strcat( writeLine, tempBuffer ); |
175 | < | } |
176 | < | else |
177 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
178 | < | |
179 | < | outFile << writeLine; |
180 | < | masterIndex++; |
181 | < | } |
182 | < | outFile.flush(); |
183 | < | } |
140 | > | ofstream finalOut; |
141 | > | vector<ofstream*> fileStreams; |
142 | ||
143 | < | sprintf( checkPointMsg, |
144 | < | "Sucessfully wrote node 0's dump configuration.\n"); |
145 | < | MPIcheckPoint(); |
188 | < | |
189 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
190 | < | procIndex++){ |
143 | > | #ifdef IS_MPI |
144 | > | if(worldRank == 0 ){ |
145 | > | #endif // is_mpi |
146 | ||
147 | < | if( worldRank == 0 ){ |
193 | < | |
194 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
195 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
196 | < | |
197 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
198 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
199 | < | |
200 | < | // Make sure where node 0 is writing to, matches where the |
201 | < | // receiving node expects it to be. |
202 | < | |
203 | < | if (masterIndex != nodeAtomsStart){ |
204 | < | sendError = 1; |
205 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
206 | < | MPI_COMM_WORLD); |
207 | < | sprintf(painCave.errMsg, |
208 | < | "DumpWriter error: atoms start index (%d) for " |
209 | < | "node %d not equal to master index (%d)", |
210 | < | nodeAtomsStart,procIndex,masterIndex ); |
211 | < | painCave.isFatal = 1; |
212 | < | simError(); |
213 | < | } |
214 | < | |
215 | < | sendError = 0; |
216 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
217 | < | MPI_COMM_WORLD); |
218 | < | |
219 | < | // recieve the nodes writeLines |
220 | < | |
221 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
222 | < | |
223 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
224 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
225 | < | |
226 | < | outFile << writeLine; |
227 | < | masterIndex++; |
228 | < | } |
229 | < | } |
147 | > | finalOut.open( entry_plug->finalName, ios::out | ios::trunc ); |
148 | ||
149 | < | else if( worldRank == procIndex ){ |
150 | < | |
151 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
152 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
153 | < | |
154 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
237 | < | MPI_COMM_WORLD); |
238 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
239 | < | MPI_COMM_WORLD); |
240 | < | |
241 | < | sendError = -1; |
242 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
243 | < | MPI_COMM_WORLD, istatus); |
244 | < | |
245 | < | if (sendError) MPIcheckPoint(); |
246 | < | |
247 | < | // send current node's configuration line by line. |
248 | < | |
249 | < | for( i=0; i<nAtoms; i++ ){ |
250 | < | |
251 | < | sprintf( tempBuffer, |
252 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
253 | < | atoms[i]->getType(), |
254 | < | atoms[i]->getX(), |
255 | < | atoms[i]->getY(), |
256 | < | atoms[i]->getZ(), |
257 | < | atoms[i]->get_vx(), |
258 | < | atoms[i]->get_vy(), |
259 | < | atoms[i]->get_vz()); // check here. |
260 | < | strcpy( writeLine, tempBuffer ); |
261 | < | |
262 | < | if( atoms[i]->isDirectional() ){ |
263 | < | |
264 | < | dAtom = (DirectionalAtom *)atoms[i]; |
265 | < | dAtom->getQ( q ); |
266 | < | |
267 | < | sprintf( tempBuffer, |
268 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
269 | < | q[0], |
270 | < | q[1], |
271 | < | q[2], |
272 | < | q[3], |
273 | < | dAtom->getJx(), |
274 | < | dAtom->getJy(), |
275 | < | dAtom->getJz()); |
276 | < | strcat( writeLine, tempBuffer ); |
277 | < | } |
278 | < | else |
279 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
280 | < | |
281 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
282 | < | MPI_COMM_WORLD); |
283 | < | } |
149 | > | if( !finalOut ){ |
150 | > | sprintf( painCave.errMsg, |
151 | > | "Could not open \"%s\" for final dump output.\n", |
152 | > | entry_plug->finalName ); |
153 | > | painCave.isFatal = 1; |
154 | > | simError(); |
155 | } | |
156 | < | |
157 | < | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
287 | < | procIndex); |
288 | < | MPIcheckPoint(); |
156 | > | |
157 | > | #ifdef IS_MPI |
158 | } | |
290 | – | |
159 | #endif // is_mpi | |
160 | + | |
161 | + | fileStreams.push_back(&finalOut); |
162 | + | writeFrame(fileStreams, currentTime); |
163 | + | |
164 | + | #ifdef IS_MPI |
165 | + | finalOut.close(); |
166 | + | #endif |
167 | + | |
168 | } | |
169 | ||
170 | + | void DumpWriter::writeFrame( vector<ofstream*>& outFile, double currentTime ){ |
171 | ||
172 | + | const int BUFFERSIZE = 2000; |
173 | + | const int MINIBUFFERSIZE = 100; |
174 | ||
175 | < | void DumpWriter::writeFinal(){ |
175 | > | char tempBuffer[BUFFERSIZE]; |
176 | > | char writeLine[BUFFERSIZE]; |
177 | ||
178 | + | int i, k; |
179 | ||
180 | < | const int BUFFERSIZE = 2000; |
300 | < | char tempBuffer[500]; |
301 | < | char writeLine[BUFFERSIZE]; |
180 | > | #ifdef IS_MPI |
181 | ||
182 | < | char finalName[500]; |
182 | > | /********************************************************************* |
183 | > | * Documentation? You want DOCUMENTATION? |
184 | > | * |
185 | > | * Why all the potatoes below? |
186 | > | * |
187 | > | * To make a long story short, the original version of DumpWriter |
188 | > | * worked in the most inefficient way possible. Node 0 would |
189 | > | * poke each of the node for an individual atom's formatted data |
190 | > | * as node 0 worked its way down the global index. This was particularly |
191 | > | * inefficient since the method blocked all processors at every atom |
192 | > | * (and did it twice!). |
193 | > | * |
194 | > | * An intermediate version of DumpWriter could be described from Node |
195 | > | * zero's perspective as follows: |
196 | > | * |
197 | > | * 1) Have 100 of your friends stand in a circle. |
198 | > | * 2) When you say go, have all of them start tossing potatoes at |
199 | > | * you (one at a time). |
200 | > | * 3) Catch the potatoes. |
201 | > | * |
202 | > | * It was an improvement, but MPI has buffers and caches that could |
203 | > | * best be described in this analogy as "potato nets", so there's no |
204 | > | * need to block the processors atom-by-atom. |
205 | > | * |
206 | > | * This new and improved DumpWriter works in an even more efficient |
207 | > | * way: |
208 | > | * |
209 | > | * 1) Have 100 of your friend stand in a circle. |
210 | > | * 2) When you say go, have them start tossing 5-pound bags of |
211 | > | * potatoes at you. |
212 | > | * 3) Once you've caught a friend's bag of potatoes, |
213 | > | * toss them a spud to let them know they can toss another bag. |
214 | > | * |
215 | > | * How's THAT for documentation? |
216 | > | * |
217 | > | *********************************************************************/ |
218 | ||
219 | < | int i; |
219 | > | int *potatoes; |
220 | > | int myPotato; |
221 | > | |
222 | > | int nProc; |
223 | > | int j, which_node, done, which_atom, local_index, currentIndex; |
224 | > | double atomData6[6]; |
225 | > | double atomData13[13]; |
226 | > | int isDirectional; |
227 | > | char* atomTypeString; |
228 | > | char MPIatomTypeString[MINIBUFFERSIZE]; |
229 | > | |
230 | > | #else //is_mpi |
231 | > | int nAtoms = entry_plug->n_atoms; |
232 | > | #endif //is_mpi |
233 | > | |
234 | double q[4]; | |
235 | DirectionalAtom* dAtom; | |
308 | – | int nAtoms = entry_plug->n_atoms; |
236 | Atom** atoms = entry_plug->atoms; | |
237 | + | double pos[3], vel[3]; |
238 | + | |
239 | + | #ifndef IS_MPI |
240 | ||
241 | < | ofstream finalOut; |
242 | < | |
243 | < | #ifdef IS_MPI |
244 | < | if(worldRank == 0 ){ |
245 | < | #endif // is_mpi |
246 | < | |
247 | < | strcpy( finalName, entry_plug->finalName ); |
248 | < | |
249 | < | finalOut.open( finalName, ios::out | ios::trunc ); |
250 | < | if( !finalOut ){ |
251 | < | sprintf( painCave.errMsg, |
252 | < | "Could not open \"%s\" for final dump output.\n", |
253 | < | finalName ); |
254 | < | painCave.isFatal = 1; |
255 | < | simError(); |
256 | < | } |
257 | < | |
258 | < | // finalOut.setf( ios::scientific ); |
329 | < | |
330 | < | #ifdef IS_MPI |
241 | > | for(k = 0; k < outFile.size(); k++){ |
242 | > | *outFile[k] << nAtoms << "\n"; |
243 | > | |
244 | > | *outFile[k] << currentTime << ";\t" |
245 | > | << entry_plug->Hmat[0][0] << "\t" |
246 | > | << entry_plug->Hmat[1][0] << "\t" |
247 | > | << entry_plug->Hmat[2][0] << ";\t" |
248 | > | |
249 | > | << entry_plug->Hmat[0][1] << "\t" |
250 | > | << entry_plug->Hmat[1][1] << "\t" |
251 | > | << entry_plug->Hmat[2][1] << ";\t" |
252 | > | |
253 | > | << entry_plug->Hmat[0][2] << "\t" |
254 | > | << entry_plug->Hmat[1][2] << "\t" |
255 | > | << entry_plug->Hmat[2][2] << ";"; |
256 | > | |
257 | > | //write out additional parameters, such as chi and eta |
258 | > | *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
259 | } | |
260 | ||
261 | < | sprintf(checkPointMsg,"Opened file for final configuration\n"); |
334 | < | MPIcheckPoint(); |
335 | < | |
336 | < | #endif //is_mpi |
261 | > | for( i=0; i<nAtoms; i++ ){ |
262 | ||
263 | < | |
263 | > | atoms[i]->getPos(pos); |
264 | > | atoms[i]->getVel(vel); |
265 | ||
340 | – | #ifndef IS_MPI |
341 | – | |
342 | – | finalOut << nAtoms << "\n"; |
343 | – | |
344 | – | finalOut << entry_plug->box_x << "\t" |
345 | – | << entry_plug->box_y << "\t" |
346 | – | << entry_plug->box_z << "\n"; |
347 | – | |
348 | – | for( i=0; i<nAtoms; i++ ){ |
349 | – | |
266 | sprintf( tempBuffer, | |
267 | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", | |
268 | atoms[i]->getType(), | |
269 | < | atoms[i]->getX(), |
270 | < | atoms[i]->getY(), |
271 | < | atoms[i]->getZ(), |
272 | < | atoms[i]->get_vx(), |
273 | < | atoms[i]->get_vy(), |
274 | < | atoms[i]->get_vz()); |
269 | > | pos[0], |
270 | > | pos[1], |
271 | > | pos[2], |
272 | > | vel[0], |
273 | > | vel[1], |
274 | > | vel[2]); |
275 | strcpy( writeLine, tempBuffer ); | |
276 | ||
277 | if( atoms[i]->isDirectional() ){ | |
278 | < | |
278 | > | |
279 | dAtom = (DirectionalAtom *)atoms[i]; | |
280 | dAtom->getQ( q ); | |
281 | < | |
281 | > | |
282 | sprintf( tempBuffer, | |
283 | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", | |
284 | q[0], | |
# | Line 376 | Line 292 | void DumpWriter::writeFinal(){ | |
292 | } | |
293 | else | |
294 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
295 | < | |
296 | < | finalOut << writeLine; |
295 | > | |
296 | > | for(k = 0; k < outFile.size(); k++) |
297 | > | *outFile[k] << writeLine; |
298 | } | |
382 | – | finalOut.flush(); |
299 | ||
300 | #else // is_mpi | |
301 | ||
302 | < | int masterIndex; |
303 | < | int nodeAtomsStart; |
304 | < | int nodeAtomsEnd; |
305 | < | int mpiErr; |
306 | < | int sendError; |
307 | < | int procIndex; |
308 | < | |
309 | < | MPI_Status istatus[MPI_STATUS_SIZE]; |
302 | > | /* code to find maximum tag value */ |
303 | > | |
304 | > | int *tagub, flag, MAXTAG; |
305 | > | MPI_Attr_get(MPI_COMM_WORLD, MPI_TAG_UB, &tagub, &flag); |
306 | > | if (flag) { |
307 | > | MAXTAG = *tagub; |
308 | > | } else { |
309 | > | MAXTAG = 32767; |
310 | > | } |
311 | ||
312 | < | |
312 | > | int haveError; |
313 | > | |
314 | > | MPI_Status istatus; |
315 | > | int *AtomToProcMap = mpiSim->getAtomToProcMap(); |
316 | > | |
317 | // write out header and node 0's coordinates | |
318 | ||
319 | if( worldRank == 0 ){ | |
320 | < | finalOut << mpiSim->getTotAtoms() << "\n"; |
321 | < | |
322 | < | finalOut << entry_plug->box_x << "\t" |
323 | < | << entry_plug->box_y << "\t" |
324 | < | << entry_plug->box_z << "\n"; |
320 | > | |
321 | > | // Node 0 needs a list of the magic potatoes for each processor; |
322 | > | |
323 | > | nProc = mpiSim->getNumberProcessors(); |
324 | > | potatoes = new int[nProc]; |
325 | > | |
326 | > | //write out the comment lines |
327 | > | for (i = 0; i < nProc; i++) |
328 | > | potatoes[i] = 0; |
329 | ||
330 | < | masterIndex = 0; |
331 | < | |
332 | < | for( i=0; i<nAtoms; i++ ){ |
330 | > | for(k = 0; k < outFile.size(); k++){ |
331 | > | *outFile[k] << mpiSim->getTotAtoms() << "\n"; |
332 | > | |
333 | > | *outFile[k] << currentTime << ";\t" |
334 | > | << entry_plug->Hmat[0][0] << "\t" |
335 | > | << entry_plug->Hmat[1][0] << "\t" |
336 | > | << entry_plug->Hmat[2][0] << ";\t" |
337 | > | |
338 | > | << entry_plug->Hmat[0][1] << "\t" |
339 | > | << entry_plug->Hmat[1][1] << "\t" |
340 | > | << entry_plug->Hmat[2][1] << ";\t" |
341 | > | |
342 | > | << entry_plug->Hmat[0][2] << "\t" |
343 | > | << entry_plug->Hmat[1][2] << "\t" |
344 | > | << entry_plug->Hmat[2][2] << ";"; |
345 | > | |
346 | > | *outFile[k] << entry_plug->the_integrator->getAdditionalParameters() << endl; |
347 | > | } |
348 | > | |
349 | > | currentIndex = 0; |
350 | > | |
351 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
352 | ||
353 | < | sprintf( tempBuffer, |
354 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
355 | < | atoms[i]->getType(), |
356 | < | atoms[i]->getX(), |
357 | < | atoms[i]->getY(), |
358 | < | atoms[i]->getZ(), |
359 | < | atoms[i]->get_vx(), |
360 | < | atoms[i]->get_vy(), |
361 | < | atoms[i]->get_vz()); |
362 | < | strcpy( writeLine, tempBuffer ); |
353 | > | // Get the Node number which has this atom; |
354 | > | |
355 | > | which_node = AtomToProcMap[i]; |
356 | > | |
357 | > | if (which_node != 0) { |
358 | > | |
359 | > | if (potatoes[which_node] + 3 >= MAXTAG) { |
360 | > | // The potato was going to exceed the maximum value, |
361 | > | // so wrap this processor potato back to 0: |
362 | > | |
363 | > | potatoes[which_node] = 0; |
364 | > | MPI_Send(0, 1, MPI_INT, which_node, 0, MPI_COMM_WORLD); |
365 | > | |
366 | > | } |
367 | > | |
368 | > | myPotato = potatoes[which_node]; |
369 | > | |
370 | > | MPI_Recv(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, which_node, |
371 | > | myPotato, MPI_COMM_WORLD, &istatus); |
372 | > | |
373 | > | atomTypeString = MPIatomTypeString; |
374 | ||
375 | < | if( atoms[i]->isDirectional() ){ |
375 | > | myPotato++; |
376 | > | |
377 | > | MPI_Recv(&isDirectional, 1, MPI_INT, which_node, |
378 | > | myPotato, MPI_COMM_WORLD, &istatus); |
379 | > | |
380 | > | myPotato++; |
381 | > | |
382 | > | if (isDirectional) { |
383 | > | MPI_Recv(atomData13, 13, MPI_DOUBLE, which_node, |
384 | > | myPotato, MPI_COMM_WORLD, &istatus); |
385 | > | } else { |
386 | > | MPI_Recv(atomData6, 6, MPI_DOUBLE, which_node, |
387 | > | myPotato, MPI_COMM_WORLD, &istatus); |
388 | > | } |
389 | > | |
390 | > | myPotato++; |
391 | > | potatoes[which_node] = myPotato; |
392 | > | |
393 | > | } else { |
394 | > | |
395 | > | haveError = 0; |
396 | > | which_atom = i; |
397 | > | |
398 | > | //local_index = -1; |
399 | > | |
400 | > | //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
401 | > | // if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
402 | > | //} |
403 | > | |
404 | > | //if (local_index != -1) { |
405 | ||
406 | < | dAtom = (DirectionalAtom *)atoms[i]; |
407 | < | dAtom->getQ( q ); |
406 | > | local_index = indexArray[currentIndex].first; |
407 | > | |
408 | > | if (which_atom == indexArray[currentIndex].second) { |
409 | > | |
410 | > | atomTypeString = atoms[local_index]->getType(); |
411 | > | |
412 | > | atoms[local_index]->getPos(pos); |
413 | > | atoms[local_index]->getVel(vel); |
414 | ||
415 | < | sprintf( tempBuffer, |
416 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
417 | < | q[0], |
418 | < | q[1], |
419 | < | q[2], |
420 | < | q[3], |
421 | < | dAtom->getJx(), |
422 | < | dAtom->getJy(), |
423 | < | dAtom->getJz()); |
424 | < | strcat( writeLine, tempBuffer ); |
415 | > | atomData6[0] = pos[0]; |
416 | > | atomData6[1] = pos[1]; |
417 | > | atomData6[2] = pos[2]; |
418 | > | |
419 | > | atomData6[3] = vel[0]; |
420 | > | atomData6[4] = vel[1]; |
421 | > | atomData6[5] = vel[2]; |
422 | > | |
423 | > | isDirectional = 0; |
424 | > | |
425 | > | if( atoms[local_index]->isDirectional() ){ |
426 | > | |
427 | > | isDirectional = 1; |
428 | > | |
429 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
430 | > | dAtom->getQ( q ); |
431 | > | |
432 | > | for (int j = 0; j < 6 ; j++) |
433 | > | atomData13[j] = atomData6[j]; |
434 | > | |
435 | > | atomData13[6] = q[0]; |
436 | > | atomData13[7] = q[1]; |
437 | > | atomData13[8] = q[2]; |
438 | > | atomData13[9] = q[3]; |
439 | > | |
440 | > | atomData13[10] = dAtom->getJx(); |
441 | > | atomData13[11] = dAtom->getJy(); |
442 | > | atomData13[12] = dAtom->getJz(); |
443 | > | } |
444 | > | |
445 | > | } else { |
446 | > | sprintf(painCave.errMsg, |
447 | > | "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
448 | > | which_atom, worldRank, currentIndex, local_index ); |
449 | > | haveError= 1; |
450 | > | simError(); |
451 | > | } |
452 | > | |
453 | > | if(haveError) DieDieDie(); |
454 | > | |
455 | > | currentIndex++; |
456 | } | |
457 | < | else |
457 | > | // If we've survived to here, format the line: |
458 | > | |
459 | > | if (!isDirectional) { |
460 | > | |
461 | > | sprintf( writeLine, |
462 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
463 | > | atomTypeString, |
464 | > | atomData6[0], |
465 | > | atomData6[1], |
466 | > | atomData6[2], |
467 | > | atomData6[3], |
468 | > | atomData6[4], |
469 | > | atomData6[5]); |
470 | > | |
471 | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); | |
472 | ||
473 | < | finalOut << writeLine; |
474 | < | masterIndex++; |
473 | > | } else { |
474 | > | |
475 | > | sprintf( writeLine, |
476 | > | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
477 | > | atomTypeString, |
478 | > | atomData13[0], |
479 | > | atomData13[1], |
480 | > | atomData13[2], |
481 | > | atomData13[3], |
482 | > | atomData13[4], |
483 | > | atomData13[5], |
484 | > | atomData13[6], |
485 | > | atomData13[7], |
486 | > | atomData13[8], |
487 | > | atomData13[9], |
488 | > | atomData13[10], |
489 | > | atomData13[11], |
490 | > | atomData13[12]); |
491 | > | |
492 | > | } |
493 | > | |
494 | > | for(k = 0; k < outFile.size(); k++) |
495 | > | *outFile[k] << writeLine; |
496 | } | |
442 | – | finalOut.flush(); |
443 | – | } |
497 | ||
498 | < | for (procIndex = 1; procIndex < mpiSim->getNumberProcessors(); |
499 | < | procIndex++){ |
498 | > | for(k = 0; k < outFile.size(); k++) |
499 | > | outFile[k]->flush(); |
500 | > | |
501 | > | sprintf( checkPointMsg, |
502 | > | "Sucessfully took a dump.\n"); |
503 | > | |
504 | > | MPIcheckPoint(); |
505 | > | |
506 | > | delete[] potatoes; |
507 | > | |
508 | > | } else { |
509 | ||
510 | < | if( worldRank == 0 ){ |
449 | < | |
450 | < | mpiErr = MPI_Recv(&nodeAtomsStart,1,MPI_INT,procIndex, |
451 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus); |
510 | > | // worldRank != 0, so I'm a remote node. |
511 | ||
512 | < | mpiErr = MPI_Recv(&nodeAtomsEnd,1,MPI_INT,procIndex, |
454 | < | TAKE_THIS_TAG,MPI_COMM_WORLD, istatus); |
455 | < | |
456 | < | // Make sure where node 0 is writing to, matches where the |
457 | < | // receiving node expects it to be. |
458 | < | |
459 | < | if (masterIndex != nodeAtomsStart){ |
460 | < | sendError = 1; |
461 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
462 | < | MPI_COMM_WORLD); |
463 | < | sprintf(painCave.errMsg, |
464 | < | "DumpWriter error: atoms start index (%d) for " |
465 | < | "node %d not equal to master index (%d)", |
466 | < | nodeAtomsStart,procIndex,masterIndex ); |
467 | < | painCave.isFatal = 1; |
468 | < | simError(); |
469 | < | } |
470 | < | |
471 | < | sendError = 0; |
472 | < | mpiErr = MPI_Send(&sendError,1,MPI_INT,procIndex,TAKE_THIS_TAG, |
473 | < | MPI_COMM_WORLD); |
512 | > | // Set my magic potato to 0: |
513 | ||
514 | < | // recieve the nodes writeLines |
514 | > | myPotato = 0; |
515 | > | currentIndex = 0; |
516 | > | |
517 | > | for (i = 0 ; i < mpiSim->getTotAtoms(); i++ ) { |
518 | > | |
519 | > | // Am I the node which has this atom? |
520 | > | |
521 | > | if (AtomToProcMap[i] == worldRank) { |
522 | ||
523 | < | for ( i = nodeAtomsStart; i <= nodeAtomsEnd; i++){ |
523 | > | if (myPotato + 3 >= MAXTAG) { |
524 | ||
525 | < | mpiErr = MPI_Recv(writeLine,BUFFERSIZE,MPI_CHAR,procIndex, |
526 | < | TAKE_THIS_TAG,MPI_COMM_WORLD,istatus ); |
525 | > | // The potato was going to exceed the maximum value, |
526 | > | // so wrap this processor potato back to 0 (and block until |
527 | > | // node 0 says we can go: |
528 | > | |
529 | > | MPI_Recv(&myPotato, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &istatus); |
530 | > | |
531 | > | } |
532 | > | which_atom = i; |
533 | ||
534 | < | finalOut << writeLine; |
483 | < | masterIndex++; |
484 | < | } |
534 | > | //local_index = -1; |
535 | ||
536 | < | finalOut.flush(); |
537 | < | } |
536 | > | //for (j=0; (j<mpiSim->getMyNlocal()) && (local_index < 0); j++) { |
537 | > | // if (atoms[j]->getGlobalIndex() == which_atom) local_index = j; |
538 | > | //} |
539 | > | |
540 | > | //if (local_index != -1) { |
541 | ||
542 | < | else if( worldRank == procIndex ){ |
542 | > | local_index = indexArray[currentIndex].first; |
543 | > | |
544 | > | if (which_atom == indexArray[currentIndex].second) { |
545 | > | |
546 | > | atomTypeString = atoms[local_index]->getType(); |
547 | > | |
548 | > | atoms[local_index]->getPos(pos); |
549 | > | atoms[local_index]->getVel(vel); |
550 | > | |
551 | > | atomData6[0] = pos[0]; |
552 | > | atomData6[1] = pos[1]; |
553 | > | atomData6[2] = pos[2]; |
554 | ||
555 | < | nodeAtomsStart = mpiSim->getMyAtomStart(); |
556 | < | nodeAtomsEnd = mpiSim->getMyAtomEnd(); |
557 | < | |
558 | < | mpiErr = MPI_Send(&nodeAtomsStart,1,MPI_INT,0,TAKE_THIS_TAG, |
559 | < | MPI_COMM_WORLD); |
496 | < | mpiErr = MPI_Send(&nodeAtomsEnd,1,MPI_INT,0,TAKE_THIS_TAG, |
497 | < | MPI_COMM_WORLD); |
498 | < | |
499 | < | mpiErr = MPI_Recv(&sendError,1,MPI_INT,0,TAKE_THIS_TAG, |
500 | < | MPI_COMM_WORLD, istatus); |
501 | < | if (sendError) MPIcheckPoint(); |
555 | > | atomData6[3] = vel[0]; |
556 | > | atomData6[4] = vel[1]; |
557 | > | atomData6[5] = vel[2]; |
558 | > | |
559 | > | isDirectional = 0; |
560 | ||
561 | < | // send current node's configuration line by line. |
561 | > | if( atoms[local_index]->isDirectional() ){ |
562 | ||
563 | < | for( i=0; i<nAtoms; i++ ){ |
564 | < | |
565 | < | sprintf( tempBuffer, |
566 | < | "%s\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t", |
567 | < | atoms[i]->getType(), |
568 | < | atoms[i]->getX(), |
569 | < | atoms[i]->getY(), |
570 | < | atoms[i]->getZ(), |
571 | < | atoms[i]->get_vx(), |
572 | < | atoms[i]->get_vy(), |
573 | < | atoms[i]->get_vz()); |
574 | < | strcpy( writeLine, tempBuffer ); |
575 | < | |
576 | < | if( atoms[i]->isDirectional() ){ |
577 | < | |
578 | < | dAtom = (DirectionalAtom *)atoms[i]; |
579 | < | dAtom->getQ( q ); |
580 | < | |
581 | < | sprintf( tempBuffer, |
582 | < | "%lf\t%lf\t%lf\t%lf\t%lf\t%lf\t%lf\n", |
583 | < | q[0], |
584 | < | q[1], |
585 | < | q[2], |
586 | < | q[3], |
529 | < | dAtom->getJx(), |
530 | < | dAtom->getJy(), |
531 | < | dAtom->getJz()); |
532 | < | strcat( writeLine, tempBuffer ); |
563 | > | isDirectional = 1; |
564 | > | |
565 | > | dAtom = (DirectionalAtom *)atoms[local_index]; |
566 | > | dAtom->getQ( q ); |
567 | > | |
568 | > | for (int j = 0; j < 6 ; j++) |
569 | > | atomData13[j] = atomData6[j]; |
570 | > | |
571 | > | atomData13[6] = q[0]; |
572 | > | atomData13[7] = q[1]; |
573 | > | atomData13[8] = q[2]; |
574 | > | atomData13[9] = q[3]; |
575 | > | |
576 | > | atomData13[10] = dAtom->getJx(); |
577 | > | atomData13[11] = dAtom->getJy(); |
578 | > | atomData13[12] = dAtom->getJz(); |
579 | > | } |
580 | > | |
581 | > | } else { |
582 | > | sprintf(painCave.errMsg, |
583 | > | "Atom %d not found on processor %d, currentIndex = %d, local_index = %d\n", |
584 | > | which_atom, worldRank, currentIndex, local_index ); |
585 | > | haveError= 1; |
586 | > | simError(); |
587 | } | |
588 | < | else |
589 | < | strcat( writeLine, "0.0\t0.0\t0.0\t0.0\t0.0\t0.0\t0.0\n" ); |
590 | < | |
591 | < | mpiErr = MPI_Send(writeLine,BUFFERSIZE,MPI_CHAR,0,TAKE_THIS_TAG, |
592 | < | MPI_COMM_WORLD); |
588 | > | |
589 | > | strncpy(MPIatomTypeString, atomTypeString, MINIBUFFERSIZE); |
590 | > | |
591 | > | // null terminate the string before sending (just in case): |
592 | > | MPIatomTypeString[MINIBUFFERSIZE-1] = '\0'; |
593 | > | |
594 | > | MPI_Send(MPIatomTypeString, MINIBUFFERSIZE, MPI_CHAR, 0, |
595 | > | myPotato, MPI_COMM_WORLD); |
596 | > | |
597 | > | myPotato++; |
598 | > | |
599 | > | MPI_Send(&isDirectional, 1, MPI_INT, 0, |
600 | > | myPotato, MPI_COMM_WORLD); |
601 | > | |
602 | > | myPotato++; |
603 | > | |
604 | > | if (isDirectional) { |
605 | > | |
606 | > | MPI_Send(atomData13, 13, MPI_DOUBLE, 0, |
607 | > | myPotato, MPI_COMM_WORLD); |
608 | > | |
609 | > | } else { |
610 | > | |
611 | > | MPI_Send(atomData6, 6, MPI_DOUBLE, 0, |
612 | > | myPotato, MPI_COMM_WORLD); |
613 | > | } |
614 | > | |
615 | > | myPotato++; |
616 | > | currentIndex++; |
617 | } | |
618 | } | |
541 | – | |
542 | – | sprintf(checkPointMsg,"Node %d sent dump configuration.", |
543 | – | procIndex); |
544 | – | MPIcheckPoint(); |
545 | – | } |
619 | ||
620 | < | if( worldRank == 0 ) finalOut.close(); |
621 | < | |
620 | > | sprintf( checkPointMsg, |
621 | > | "Sucessfully took a dump.\n"); |
622 | > | MPIcheckPoint(); |
623 | ||
624 | + | } |
625 | + | |
626 | #endif // is_mpi | |
627 | } | |
628 | + | |
629 | + | #ifdef IS_MPI |
630 | + | |
631 | + | // a couple of functions to let us escape the write loop |
632 | + | |
633 | + | void dWrite::DieDieDie( void ){ |
634 | + | |
635 | + | MPI_Finalize(); |
636 | + | exit (0); |
637 | + | } |
638 | + | |
639 | + | #endif //is_mpi |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |