# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
2 | > | #include <iostream> |
3 | #include <cstdlib> | |
4 | #include <cstring> | |
5 | + | #include <cmath> |
6 | #include <mpi.h> | |
7 | #include <mpi++.h> | |
8 | ||
# | Line 10 | Line 11 | |
11 | #include "fortranWrappers.hpp" | |
12 | #include "randomSPRNG.hpp" | |
13 | ||
14 | + | #define BASE_SEED 123456789 |
15 | ||
16 | mpiSimulation* mpiSim; | |
17 | ||
# | Line 23 | Line 25 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
25 | ||
26 | MolToProcMap = new int[entryPlug->n_mol]; | |
27 | MolComponentType = new int[entryPlug->n_mol]; | |
26 | – | |
28 | AtomToProcMap = new int[entryPlug->n_atoms]; | |
29 | ||
30 | mpiSim = this; | |
# | Line 33 | Line 34 | mpiSimulation::~mpiSimulation(){ | |
34 | ||
35 | mpiSimulation::~mpiSimulation(){ | |
36 | ||
37 | + | delete[] MolToProcMap; |
38 | + | delete[] MolComponentType; |
39 | + | delete[] AtomToProcMap; |
40 | + | |
41 | delete mpiPlug; | |
42 | // perhaps we should let fortran know the party is over. | |
43 | ||
# | Line 44 | Line 49 | int* mpiSimulation::divideLabor( void ){ | |
49 | ||
50 | int nComponents; | |
51 | MoleculeStamp** compStamps; | |
52 | < | randomSPRNG myRandom; |
52 | > | randomSPRNG *myRandom; |
53 | int* componentsNmol; | |
54 | int* AtomsPerProc; | |
55 | ||
# | Line 58 | Line 63 | int* mpiSimulation::divideLabor( void ){ | |
63 | int molIndex, atomIndex, compIndex, compStart; | |
64 | int done; | |
65 | int nLocal, molLocal; | |
66 | < | int i, index; |
66 | > | int i, j, loops, which_proc, nmol_local, natoms_local; |
67 | > | int nmol_global, natoms_global; |
68 | > | int local_index, index; |
69 | int smallDiff, bigDiff; | |
70 | + | int baseSeed = BASE_SEED; |
71 | ||
72 | int testSum; | |
73 | ||
# | Line 75 | Line 83 | int* mpiSimulation::divideLabor( void ){ | |
83 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
84 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
85 | ||
86 | < | myRandom = new randomSPRNG(); |
86 | > | myRandom = new randomSPRNG( baseSeed ); |
87 | ||
88 | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; | |
89 | ||
# | Line 120 | Line 128 | int* mpiSimulation::divideLabor( void ){ | |
128 | ||
129 | // Pick a processor at random | |
130 | ||
131 | < | which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); |
131 | > | which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
132 | ||
133 | // How many atoms does this processor have? | |
134 | ||
135 | old_atoms = AtomsPerProc[which_proc]; | |
136 | + | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
137 | + | new_atoms = old_atoms + add_atoms; |
138 | ||
139 | // If the processor already had too many atoms, just skip this | |
140 | // processor and try again. | |
141 | ||
132 | – | if (old_atoms >= nTarget) continue; |
133 | – | |
134 | – | add_atoms = compStamps[MolComponentType[i]]->getNatoms(); |
135 | – | new_atoms = old_atoms + add_atoms; |
136 | – | |
137 | – | // If we can add this molecule to this processor without sending |
138 | – | // it above nTarget, then go ahead and do it: |
139 | – | |
140 | – | if (new_atoms <= nTarget) { |
141 | – | MolToProcMap[i] = which_proc; |
142 | – | AtomsPerProc[which_proc] += add_atoms; |
143 | – | for (j = 0 ; j < add_atoms; j++ ) { |
144 | – | atomIndex++; |
145 | – | AtomToProcMap[atomIndex] = which_proc; |
146 | – | } |
147 | – | done = 1; |
148 | – | continue; |
149 | – | } |
150 | – | |
142 | // If we've been through this loop too many times, we need | |
143 | // to just give up and assign the molecule to this processor | |
144 | // and be done with it. | |
# | Line 164 | Line 155 | int* mpiSimulation::divideLabor( void ){ | |
155 | MolToProcMap[i] = which_proc; | |
156 | AtomsPerProc[which_proc] += add_atoms; | |
157 | for (j = 0 ; j < add_atoms; j++ ) { | |
158 | < | atomIndex++; |
159 | < | AtomToProcMap[atomIndex] = which_proc; |
158 | > | AtomToProcMap[atomIndex] = which_proc; |
159 | > | atomIndex++; |
160 | > | } |
161 | > | done = 1; |
162 | > | continue; |
163 | > | } |
164 | > | |
165 | > | if (old_atoms >= nTarget) continue; |
166 | > | |
167 | > | // If we can add this molecule to this processor without sending |
168 | > | // it above nTarget, then go ahead and do it: |
169 | > | |
170 | > | if (new_atoms <= nTarget) { |
171 | > | MolToProcMap[i] = which_proc; |
172 | > | AtomsPerProc[which_proc] += add_atoms; |
173 | > | for (j = 0 ; j < add_atoms; j++ ) { |
174 | > | AtomToProcMap[atomIndex] = which_proc; |
175 | > | atomIndex++; |
176 | } | |
177 | done = 1; | |
178 | continue; | |
179 | } | |
180 | ||
181 | + | |
182 | // The only situation left is where old_atoms < nTarget, but | |
183 | // new_atoms > nTarget. We want to accept this with some | |
184 | // probability that dies off the farther we are from nTarget | |
# | Line 180 | Line 188 | int* mpiSimulation::divideLabor( void ){ | |
188 | // where a = 1 / (average atoms per molecule) | |
189 | ||
190 | x = (double) (new_atoms - nTarget); | |
191 | < | y = myRandom.getRandom(); |
191 | > | y = myRandom->getRandom(); |
192 | ||
193 | if (exp(- a * x) > y) { | |
194 | MolToProcMap[i] = which_proc; | |
195 | AtomsPerProc[which_proc] += add_atoms; | |
196 | for (j = 0 ; j < add_atoms; j++ ) { | |
197 | < | atomIndex++; |
198 | < | AtomToProcMap[atomIndex] = which_proc; |
199 | < | } |
197 | > | AtomToProcMap[atomIndex] = which_proc; |
198 | > | atomIndex++; |
199 | > | } |
200 | done = 1; | |
201 | continue; | |
202 | } else { | |
# | Line 200 | Line 208 | int* mpiSimulation::divideLabor( void ){ | |
208 | ||
209 | // Spray out this nonsense to all other processors: | |
210 | ||
211 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
211 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
212 | MPI_INT, 0); | |
213 | ||
214 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
214 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
215 | MPI_INT, 0); | |
216 | ||
217 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
217 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
218 | MPI_INT, 0); | |
219 | ||
220 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
220 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
221 | MPI_INT, 0); | |
222 | } else { | |
223 | ||
224 | // Listen to your marching orders from processor 0: | |
225 | ||
226 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
226 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
227 | MPI_INT, 0); | |
228 | ||
229 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
229 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
230 | MPI_INT, 0); | |
231 | ||
232 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
232 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
233 | MPI_INT, 0); | |
234 | ||
235 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
235 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
236 | MPI_INT, 0); | |
237 | + | |
238 | + | |
239 | } | |
240 | ||
241 | ||
# | Line 245 | Line 255 | int* mpiSimulation::divideLabor( void ){ | |
255 | } | |
256 | } | |
257 | ||
258 | + | std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n"; |
259 | + | |
260 | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); | |
261 | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); | |
262 | ||
# | Line 277 | Line 289 | int* mpiSimulation::divideLabor( void ){ | |
289 | local_index = 0; | |
290 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
291 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
292 | < | globalIndex[local_index] = |
292 | > | globalIndex[local_index] = i; |
293 | > | local_index++; |
294 | } | |
295 | } | |
296 | ||
297 | < | |
285 | < | |
286 | < | |
287 | < | index = mpiPlug->myAtomStart; |
288 | < | // for( i=0; i<mpiPlug->myNlocal; i++){ |
289 | < | // globalIndex[i] = index; |
290 | < | // index++; |
291 | < | // } |
292 | < | |
293 | < | // return globalIndex; |
297 | > | return globalIndex; |
298 | } | |
299 | ||
300 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |