# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
2 | > | #include <iostream> |
3 | #include <cstdlib> | |
4 | #include <cstring> | |
5 | + | #include <cmath> |
6 | #include <mpi.h> | |
7 | #include <mpi++.h> | |
8 | ||
9 | #include "mpiSimulation.hpp" | |
10 | #include "simError.h" | |
11 | #include "fortranWrappers.hpp" | |
12 | + | #include "randomSPRNG.hpp" |
13 | ||
14 | + | #define BASE_SEED 123456789 |
15 | ||
13 | – | |
14 | – | |
16 | mpiSimulation* mpiSim; | |
17 | ||
18 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
# | Line 21 | Line 22 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
22 | ||
23 | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); | |
24 | mpiPlug->myNode = worldRank; | |
25 | < | |
25 | > | |
26 | > | MolToProcMap = new int[entryPlug->n_mol]; |
27 | > | MolComponentType = new int[entryPlug->n_mol]; |
28 | > | AtomToProcMap = new int[entryPlug->n_atoms]; |
29 | > | |
30 | mpiSim = this; | |
31 | wrapMeSimParallel( this ); | |
32 | } | |
# | Line 29 | Line 34 | mpiSimulation::~mpiSimulation(){ | |
34 | ||
35 | mpiSimulation::~mpiSimulation(){ | |
36 | ||
37 | + | delete[] MolToProcMap; |
38 | + | delete[] MolComponentType; |
39 | + | delete[] AtomToProcMap; |
40 | + | |
41 | delete mpiPlug; | |
42 | // perhaps we should let fortran know the party is over. | |
43 | ||
44 | } | |
45 | ||
37 | – | |
38 | – | |
46 | int* mpiSimulation::divideLabor( void ){ | |
47 | ||
48 | int* globalIndex; | |
49 | ||
50 | int nComponents; | |
51 | MoleculeStamp** compStamps; | |
52 | + | randomSPRNG *myRandom; |
53 | int* componentsNmol; | |
54 | + | int* AtomsPerProc; |
55 | ||
56 | double numerator; | |
57 | double denominator; | |
58 | double precast; | |
59 | + | double x, y, a; |
60 | + | int old_atoms, add_atoms, new_atoms; |
61 | ||
62 | int nTarget; | |
63 | int molIndex, atomIndex, compIndex, compStart; | |
64 | int done; | |
65 | int nLocal, molLocal; | |
66 | < | int i, index; |
66 | > | int i, j, loops, which_proc, nmol_local, natoms_local; |
67 | > | int nmol_global, natoms_global; |
68 | > | int local_index, index; |
69 | int smallDiff, bigDiff; | |
70 | + | int baseSeed = BASE_SEED; |
71 | ||
72 | int testSum; | |
73 | ||
74 | nComponents = entryPlug->nComponents; | |
75 | compStamps = entryPlug->compStamps; | |
76 | componentsNmol = entryPlug->componentsNmol; | |
77 | < | |
77 | > | AtomsPerProc = new int[mpiPlug->numberProcessors]; |
78 | > | |
79 | mpiPlug->nAtomsGlobal = entryPlug->n_atoms; | |
80 | mpiPlug->nBondsGlobal = entryPlug->n_bonds; | |
81 | mpiPlug->nBendsGlobal = entryPlug->n_bends; | |
# | Line 68 | Line 83 | int* mpiSimulation::divideLabor( void ){ | |
83 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
84 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
85 | ||
86 | + | myRandom = new randomSPRNG( baseSeed ); |
87 | ||
88 | + | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
89 | ||
90 | + | // Initialize things that we'll send out later: |
91 | + | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
92 | + | AtomsPerProc[i] = 0; |
93 | + | } |
94 | + | for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
95 | + | // default to an error condition: |
96 | + | MolToProcMap[i] = -1; |
97 | + | MolComponentType[i] = -1; |
98 | + | } |
99 | + | for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
100 | + | // default to an error condition: |
101 | + | AtomToProcMap[i] = -1; |
102 | + | } |
103 | + | |
104 | + | if (mpiPlug->myNode == 0) { |
105 | + | numerator = (double) entryPlug->n_atoms; |
106 | + | denominator = (double) mpiPlug->numberProcessors; |
107 | + | precast = numerator / denominator; |
108 | + | nTarget = (int)( precast + 0.5 ); |
109 | ||
110 | + | // Build the array of molecule component types first |
111 | + | molIndex = 0; |
112 | + | for (i=0; i < nComponents; i++) { |
113 | + | for (j=0; j < componentsNmol[i]; j++) { |
114 | + | MolComponentType[molIndex] = i; |
115 | + | molIndex++; |
116 | + | } |
117 | + | } |
118 | ||
119 | + | atomIndex = 0; |
120 | ||
121 | + | for (i = 0; i < molIndex; i++ ) { |
122 | ||
123 | + | done = 0; |
124 | + | loops = 0; |
125 | ||
126 | + | while( !done ){ |
127 | + | loops++; |
128 | + | |
129 | + | // Pick a processor at random |
130 | ||
131 | + | which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
132 | ||
133 | + | // How many atoms does this processor have? |
134 | + | |
135 | + | old_atoms = AtomsPerProc[which_proc]; |
136 | + | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
137 | + | new_atoms = old_atoms + add_atoms; |
138 | ||
139 | + | // If the processor already had too many atoms, just skip this |
140 | + | // processor and try again. |
141 | ||
142 | < | numerator = (double) entryPlug->n_atoms; |
143 | < | denominator = (double) mpiPlug->numberProcessors; |
144 | < | precast = numerator / denominator; |
145 | < | nTarget = (int)( precast + 0.5 ); |
146 | < | |
147 | < | molIndex = 0; |
148 | < | atomIndex = 0; |
149 | < | compIndex = 0; |
150 | < | compStart = 0; |
151 | < | for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
152 | < | |
153 | < | done = 0; |
154 | < | nLocal = 0; |
155 | < | molLocal = 0; |
142 | > | // If we've been through this loop too many times, we need |
143 | > | // to just give up and assign the molecule to this processor |
144 | > | // and be done with it. |
145 | > | |
146 | > | if (loops > 100) { |
147 | > | sprintf( painCave.errMsg, |
148 | > | "I've tried 100 times to assign molecule %d to a " |
149 | > | " processor, but can't find a good spot.\n" |
150 | > | "I'm assigning it at random to processor %d.\n", |
151 | > | i, which_proc); |
152 | > | painCave.isFatal = 0; |
153 | > | simError(); |
154 | > | |
155 | > | MolToProcMap[i] = which_proc; |
156 | > | AtomsPerProc[which_proc] += add_atoms; |
157 | > | for (j = 0 ; j < add_atoms; j++ ) { |
158 | > | AtomToProcMap[atomIndex] = which_proc; |
159 | > | atomIndex++; |
160 | > | } |
161 | > | done = 1; |
162 | > | continue; |
163 | > | } |
164 | ||
165 | < | if( i == mpiPlug->myNode ){ |
98 | < | mpiPlug->myMolStart = molIndex; |
99 | < | mpiPlug->myAtomStart = atomIndex; |
100 | < | } |
165 | > | if (old_atoms >= nTarget) continue; |
166 | ||
167 | < | while( !done ){ |
168 | < | |
169 | < | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
170 | < | compStart = molIndex; |
171 | < | compIndex++; |
172 | < | continue; |
173 | < | } |
167 | > | // If we can add this molecule to this processor without sending |
168 | > | // it above nTarget, then go ahead and do it: |
169 | > | |
170 | > | if (new_atoms <= nTarget) { |
171 | > | MolToProcMap[i] = which_proc; |
172 | > | AtomsPerProc[which_proc] += add_atoms; |
173 | > | for (j = 0 ; j < add_atoms; j++ ) { |
174 | > | AtomToProcMap[atomIndex] = which_proc; |
175 | > | atomIndex++; |
176 | > | } |
177 | > | done = 1; |
178 | > | continue; |
179 | > | } |
180 | ||
181 | < | nLocal += compStamps[compIndex]->getNAtoms(); |
182 | < | atomIndex += compStamps[compIndex]->getNAtoms(); |
183 | < | molIndex++; |
184 | < | molLocal++; |
185 | < | |
186 | < | if ( nLocal == nTarget ) done = 1; |
187 | < | |
188 | < | else if( nLocal < nTarget ){ |
189 | < | smallDiff = nTarget - nLocal; |
190 | < | } |
191 | < | else if( nLocal > nTarget ){ |
192 | < | bigDiff = nLocal - nTarget; |
193 | < | |
194 | < | if( bigDiff < smallDiff ) done = 1; |
195 | < | else{ |
196 | < | molIndex--; |
197 | < | molLocal--; |
198 | < | atomIndex -= compStamps[compIndex]->getNAtoms(); |
199 | < | nLocal -= compStamps[compIndex]->getNAtoms(); |
200 | < | done = 1; |
201 | < | } |
181 | > | |
182 | > | // The only situation left is where old_atoms < nTarget, but |
183 | > | // new_atoms > nTarget. We want to accept this with some |
184 | > | // probability that dies off the farther we are from nTarget |
185 | > | |
186 | > | // roughly: x = new_atoms - nTarget |
187 | > | // Pacc(x) = exp(- a * x) |
188 | > | // where a = 1 / (average atoms per molecule) |
189 | > | |
190 | > | x = (double) (new_atoms - nTarget); |
191 | > | y = myRandom->getRandom(); |
192 | > | |
193 | > | if (exp(- a * x) > y) { |
194 | > | MolToProcMap[i] = which_proc; |
195 | > | AtomsPerProc[which_proc] += add_atoms; |
196 | > | for (j = 0 ; j < add_atoms; j++ ) { |
197 | > | AtomToProcMap[atomIndex] = which_proc; |
198 | > | atomIndex++; |
199 | > | } |
200 | > | done = 1; |
201 | > | continue; |
202 | > | } else { |
203 | > | continue; |
204 | > | } |
205 | > | |
206 | } | |
207 | } | |
208 | + | |
209 | + | // Spray out this nonsense to all other processors: |
210 | + | |
211 | + | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
212 | + | MPI_INT, 0); |
213 | + | |
214 | + | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
215 | + | MPI_INT, 0); |
216 | + | |
217 | + | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
218 | + | MPI_INT, 0); |
219 | + | |
220 | + | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
221 | + | MPI_INT, 0); |
222 | + | } else { |
223 | + | |
224 | + | // Listen to your marching orders from processor 0: |
225 | ||
226 | < | if( i == mpiPlug->myNode ){ |
227 | < | mpiPlug->myMolEnd = (molIndex - 1); |
136 | < | mpiPlug->myAtomEnd = (atomIndex - 1); |
137 | < | mpiPlug->myNlocal = nLocal; |
138 | < | mpiPlug->myMol = molLocal; |
139 | < | } |
226 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
227 | > | MPI_INT, 0); |
228 | ||
229 | < | numerator = (double)( entryPlug->n_atoms - atomIndex ); |
230 | < | denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
231 | < | precast = numerator / denominator; |
232 | < | nTarget = (int)( precast + 0.5 ); |
229 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
230 | > | MPI_INT, 0); |
231 | > | |
232 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
233 | > | MPI_INT, 0); |
234 | > | |
235 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
236 | > | MPI_INT, 0); |
237 | > | |
238 | > | |
239 | } | |
146 | – | |
147 | – | if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
148 | – | mpiPlug->myMolStart = molIndex; |
149 | – | mpiPlug->myAtomStart = atomIndex; |
240 | ||
151 | – | nLocal = 0; |
152 | – | molLocal = 0; |
153 | – | while( compIndex < nComponents ){ |
154 | – | |
155 | – | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
156 | – | compStart = molIndex; |
157 | – | compIndex++; |
158 | – | continue; |
159 | – | } |
241 | ||
242 | < | nLocal += compStamps[compIndex]->getNAtoms(); |
243 | < | atomIndex += compStamps[compIndex]->getNAtoms(); |
244 | < | molIndex++; |
245 | < | molLocal++; |
246 | < | } |
247 | < | |
248 | < | mpiPlug->myMolEnd = (molIndex - 1); |
168 | < | mpiPlug->myAtomEnd = (atomIndex - 1); |
169 | < | mpiPlug->myNlocal = nLocal; |
170 | < | mpiPlug->myMol = molLocal; |
242 | > | // Let's all check for sanity: |
243 | > | |
244 | > | nmol_local = 0; |
245 | > | for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
246 | > | if (MolToProcMap[i] == mpiPlug->myNode) { |
247 | > | nmol_local++; |
248 | > | } |
249 | } | |
250 | ||
251 | + | natoms_local = 0; |
252 | + | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
253 | + | if (AtomToProcMap[i] == mpiPlug->myNode) { |
254 | + | natoms_local++; |
255 | + | } |
256 | + | } |
257 | ||
258 | < | MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
258 | > | std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n"; |
259 | > | |
260 | > | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
261 | > | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
262 | ||
263 | < | if( mpiPlug->myNode == 0 ){ |
264 | < | if( testSum != entryPlug->n_atoms ){ |
265 | < | sprintf( painCave.errMsg, |
266 | < | "The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
267 | < | testSum, entryPlug->n_atoms ); |
268 | < | painCave.isFatal = 1; |
269 | < | simError(); |
183 | < | } |
263 | > | if( nmol_global != entryPlug->n_mol ){ |
264 | > | sprintf( painCave.errMsg, |
265 | > | "The sum of all nmol_local, %d, did not equal the " |
266 | > | "total number of molecules, %d.\n", |
267 | > | nmol_global, entryPlug->n_mol ); |
268 | > | painCave.isFatal = 1; |
269 | > | simError(); |
270 | } | |
271 | + | |
272 | + | if( natoms_global != entryPlug->n_atoms ){ |
273 | + | sprintf( painCave.errMsg, |
274 | + | "The sum of all natoms_local, %d, did not equal the " |
275 | + | "total number of atoms, %d.\n", |
276 | + | natoms_global, entryPlug->n_atoms ); |
277 | + | painCave.isFatal = 1; |
278 | + | simError(); |
279 | + | } |
280 | ||
281 | sprintf( checkPointMsg, | |
282 | "Successfully divided the molecules among the processors.\n" ); | |
283 | MPIcheckPoint(); | |
284 | ||
285 | < | // lets create the identity array |
285 | > | mpiPlug->myNMol = nmol_local; |
286 | > | mpiPlug->myNlocal = natoms_local; |
287 | ||
288 | globalIndex = new int[mpiPlug->myNlocal]; | |
289 | < | index = mpiPlug->myAtomStart; |
290 | < | for( i=0; i<mpiPlug->myNlocal; i++){ |
291 | < | globalIndex[i] = index; |
292 | < | index++; |
289 | > | local_index = 0; |
290 | > | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
291 | > | if (AtomToProcMap[i] == mpiPlug->myNode) { |
292 | > | globalIndex[local_index] = i; |
293 | > | local_index++; |
294 | > | } |
295 | } | |
296 | < | |
296 | > | |
297 | return globalIndex; | |
298 | } | |
299 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |