# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
2 | > | #include <iostream> |
3 | #include <cstdlib> | |
4 | #include <cstring> | |
5 | + | #include <cmath> |
6 | #include <mpi.h> | |
7 | #include <mpi++.h> | |
8 | ||
# | Line 10 | Line 11 | |
11 | #include "fortranWrappers.hpp" | |
12 | #include "randomSPRNG.hpp" | |
13 | ||
14 | + | #define BASE_SEED 123456789 |
15 | ||
16 | mpiSimulation* mpiSim; | |
17 | ||
# | Line 23 | Line 25 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
25 | ||
26 | MolToProcMap = new int[entryPlug->n_mol]; | |
27 | MolComponentType = new int[entryPlug->n_mol]; | |
26 | – | |
28 | AtomToProcMap = new int[entryPlug->n_atoms]; | |
29 | ||
30 | mpiSim = this; | |
# | Line 33 | Line 34 | mpiSimulation::~mpiSimulation(){ | |
34 | ||
35 | mpiSimulation::~mpiSimulation(){ | |
36 | ||
37 | + | delete[] MolToProcMap; |
38 | + | delete[] MolComponentType; |
39 | + | delete[] AtomToProcMap; |
40 | + | |
41 | delete mpiPlug; | |
42 | // perhaps we should let fortran know the party is over. | |
43 | ||
# | Line 44 | Line 49 | int* mpiSimulation::divideLabor( void ){ | |
49 | ||
50 | int nComponents; | |
51 | MoleculeStamp** compStamps; | |
52 | < | randomSPRNG myRandom; |
52 | > | randomSPRNG *myRandom; |
53 | int* componentsNmol; | |
54 | int* AtomsPerProc; | |
55 | ||
# | Line 58 | Line 63 | int* mpiSimulation::divideLabor( void ){ | |
63 | int molIndex, atomIndex, compIndex, compStart; | |
64 | int done; | |
65 | int nLocal, molLocal; | |
66 | < | int i, index; |
66 | > | int i, j, loops, which_proc, nmol_local, natoms_local; |
67 | > | int nmol_global, natoms_global; |
68 | > | int local_index, index; |
69 | int smallDiff, bigDiff; | |
70 | + | int baseSeed = BASE_SEED; |
71 | ||
72 | int testSum; | |
73 | ||
# | Line 75 | Line 83 | int* mpiSimulation::divideLabor( void ){ | |
83 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
84 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
85 | ||
86 | < | myRandom = new randomSPRNG(); |
86 | > | myRandom = new randomSPRNG( baseSeed ); |
87 | ||
88 | < | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
88 | > | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
89 | ||
90 | // Initialize things that we'll send out later: | |
91 | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { | |
# | Line 120 | Line 128 | int* mpiSimulation::divideLabor( void ){ | |
128 | ||
129 | // Pick a processor at random | |
130 | ||
131 | < | which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); |
131 | > | which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
132 | ||
133 | // How many atoms does this processor have? | |
134 | ||
135 | old_atoms = AtomsPerProc[which_proc]; | |
136 | < | |
129 | < | // If the processor already had too many atoms, just skip this |
130 | < | // processor and try again. |
131 | < | |
132 | < | if (old_atoms >= nTarget) continue; |
133 | < | |
134 | < | add_atoms = compStamps[MolComponentType[i]]->getNatoms(); |
136 | > | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
137 | new_atoms = old_atoms + add_atoms; | |
136 | – | |
137 | – | // If we can add this molecule to this processor without sending |
138 | – | // it above nTarget, then go ahead and do it: |
139 | – | |
140 | – | if (new_atoms <= nTarget) { |
141 | – | MolToProcMap[i] = which_proc; |
142 | – | AtomsPerProc[which_proc] += add_atoms; |
143 | – | for (j = 0 ; j < add_atoms; j++ ) { |
144 | – | atomIndex++; |
145 | – | AtomToProcMap[atomIndex] = which_proc; |
146 | – | } |
147 | – | done = 1; |
148 | – | continue; |
149 | – | } |
138 | ||
139 | // If we've been through this loop too many times, we need | |
140 | // to just give up and assign the molecule to this processor | |
# | Line 164 | Line 152 | int* mpiSimulation::divideLabor( void ){ | |
152 | MolToProcMap[i] = which_proc; | |
153 | AtomsPerProc[which_proc] += add_atoms; | |
154 | for (j = 0 ; j < add_atoms; j++ ) { | |
155 | < | atomIndex++; |
156 | < | AtomToProcMap[atomIndex] = which_proc; |
155 | > | AtomToProcMap[atomIndex] = which_proc; |
156 | > | atomIndex++; |
157 | } | |
158 | done = 1; | |
159 | continue; | |
160 | } | |
161 | + | |
162 | + | // If we can add this molecule to this processor without sending |
163 | + | // it above nTarget, then go ahead and do it: |
164 | + | |
165 | + | if (new_atoms <= nTarget) { |
166 | + | MolToProcMap[i] = which_proc; |
167 | + | AtomsPerProc[which_proc] += add_atoms; |
168 | + | for (j = 0 ; j < add_atoms; j++ ) { |
169 | + | AtomToProcMap[atomIndex] = which_proc; |
170 | + | atomIndex++; |
171 | + | } |
172 | + | done = 1; |
173 | + | continue; |
174 | + | } |
175 | ||
174 | – | // The only situation left is where old_atoms < nTarget, but |
175 | – | // new_atoms > nTarget. We want to accept this with some |
176 | – | // probability that dies off the farther we are from nTarget |
176 | ||
177 | + | // The only situation left is when new_atoms > nTarget. We |
178 | + | // want to accept this with some probability that dies off the |
179 | + | // farther we are from nTarget |
180 | + | |
181 | // roughly: x = new_atoms - nTarget | |
182 | // Pacc(x) = exp(- a * x) | |
183 | < | // where a = 1 / (average atoms per molecule) |
183 | > | // where a = penalty / (average atoms per molecule) |
184 | ||
185 | x = (double) (new_atoms - nTarget); | |
186 | < | y = myRandom.getRandom(); |
187 | < | |
188 | < | if (exp(- a * x) > y) { |
186 | > | y = myRandom->getRandom(); |
187 | > | |
188 | > | if (y < exp(- a * x)) { |
189 | MolToProcMap[i] = which_proc; | |
190 | AtomsPerProc[which_proc] += add_atoms; | |
191 | for (j = 0 ; j < add_atoms; j++ ) { | |
192 | < | atomIndex++; |
193 | < | AtomToProcMap[atomIndex] = which_proc; |
194 | < | } |
192 | > | AtomToProcMap[atomIndex] = which_proc; |
193 | > | atomIndex++; |
194 | > | } |
195 | done = 1; | |
196 | continue; | |
197 | } else { | |
# | Line 200 | Line 203 | int* mpiSimulation::divideLabor( void ){ | |
203 | ||
204 | // Spray out this nonsense to all other processors: | |
205 | ||
206 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
206 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
207 | MPI_INT, 0); | |
208 | ||
209 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
210 | MPI_INT, 0); | |
211 | ||
212 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
212 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
213 | MPI_INT, 0); | |
214 | ||
215 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
215 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
216 | MPI_INT, 0); | |
217 | } else { | |
218 | ||
219 | // Listen to your marching orders from processor 0: | |
220 | ||
221 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
221 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
222 | MPI_INT, 0); | |
223 | ||
224 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
225 | MPI_INT, 0); | |
226 | ||
227 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
227 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
228 | MPI_INT, 0); | |
229 | ||
230 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
230 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
231 | MPI_INT, 0); | |
232 | + | |
233 | + | |
234 | } | |
235 | ||
236 | ||
# | Line 277 | Line 282 | int* mpiSimulation::divideLabor( void ){ | |
282 | local_index = 0; | |
283 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
284 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
280 | – | local_index++; |
285 | globalIndex[local_index] = i; | |
286 | + | local_index++; |
287 | } | |
288 | } | |
289 | ||
290 | < | |
286 | < | |
287 | < | |
288 | < | index = mpiPlug->myAtomStart; |
289 | < | // for( i=0; i<mpiPlug->myNlocal; i++){ |
290 | < | // globalIndex[i] = index; |
291 | < | // index++; |
292 | < | // } |
293 | < | |
294 | < | // return globalIndex; |
290 | > | return globalIndex; |
291 | } | |
292 | ||
293 | ||
# | Line 300 | Line 296 | void mpiSimulation::mpiRefresh( void ){ | |
296 | int isError, i; | |
297 | int *globalIndex = new int[mpiPlug->myNlocal]; | |
298 | ||
299 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
299 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
300 | > | // not barf |
301 | ||
302 | + | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
303 | + | |
304 | ||
305 | isError = 0; | |
306 | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |