# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
3 | < | #include <cstdlib> |
4 | < | #include <cstring> |
5 | < | #include <cmath> |
2 | > | #include <iostream> |
3 | > | #include <stdlib.h> |
4 | > | #include <string.h> |
5 | > | #include <math.h> |
6 | #include <mpi.h> | |
7 | – | #include <mpi++.h> |
7 | ||
8 | #include "mpiSimulation.hpp" | |
9 | #include "simError.h" | |
10 | #include "fortranWrappers.hpp" | |
11 | #include "randomSPRNG.hpp" | |
12 | ||
14 | – | #define BASE_SEED 123456789 |
15 | – | |
13 | mpiSimulation* mpiSim; | |
14 | ||
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
# | Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
17 | entryPlug = the_entryPlug; | |
18 | mpiPlug = new mpiSimData; | |
19 | ||
20 | < | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
21 | mpiPlug->myNode = worldRank; | |
22 | ||
23 | MolToProcMap = new int[entryPlug->n_mol]; | |
24 | MolComponentType = new int[entryPlug->n_mol]; | |
28 | – | |
25 | AtomToProcMap = new int[entryPlug->n_atoms]; | |
26 | ||
27 | mpiSim = this; | |
# | Line 61 | Line 57 | int* mpiSimulation::divideLabor( void ){ | |
57 | int old_atoms, add_atoms, new_atoms; | |
58 | ||
59 | int nTarget; | |
60 | < | int molIndex, atomIndex, compIndex, compStart; |
60 | > | int molIndex, atomIndex; |
61 | int done; | |
66 | – | int nLocal, molLocal; |
62 | int i, j, loops, which_proc, nmol_local, natoms_local; | |
63 | int nmol_global, natoms_global; | |
64 | < | int local_index, index; |
65 | < | int smallDiff, bigDiff; |
71 | < | int baseSeed = BASE_SEED; |
64 | > | int local_index; |
65 | > | int baseSeed = entryPlug->getSeed(); |
66 | ||
73 | – | int testSum; |
74 | – | |
67 | nComponents = entryPlug->nComponents; | |
68 | compStamps = entryPlug->compStamps; | |
69 | componentsNmol = entryPlug->componentsNmol; | |
# | Line 84 | Line 76 | int* mpiSimulation::divideLabor( void ){ | |
76 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
77 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
78 | ||
79 | + | |
80 | myRandom = new randomSPRNG( baseSeed ); | |
81 | ||
82 | < | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
82 | > | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
83 | ||
84 | // Initialize things that we'll send out later: | |
85 | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { | |
# | Line 134 | Line 127 | int* mpiSimulation::divideLabor( void ){ | |
127 | // How many atoms does this processor have? | |
128 | ||
129 | old_atoms = AtomsPerProc[which_proc]; | |
137 | – | |
138 | – | // If the processor already had too many atoms, just skip this |
139 | – | // processor and try again. |
140 | – | |
141 | – | if (old_atoms >= nTarget) continue; |
142 | – | |
130 | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); | |
131 | new_atoms = old_atoms + add_atoms; | |
145 | – | |
146 | – | // If we can add this molecule to this processor without sending |
147 | – | // it above nTarget, then go ahead and do it: |
148 | – | |
149 | – | if (new_atoms <= nTarget) { |
150 | – | MolToProcMap[i] = which_proc; |
151 | – | AtomsPerProc[which_proc] += add_atoms; |
152 | – | for (j = 0 ; j < add_atoms; j++ ) { |
153 | – | atomIndex++; |
154 | – | AtomToProcMap[atomIndex] = which_proc; |
155 | – | } |
156 | – | done = 1; |
157 | – | continue; |
158 | – | } |
132 | ||
133 | // If we've been through this loop too many times, we need | |
134 | // to just give up and assign the molecule to this processor | |
# | Line 173 | Line 146 | int* mpiSimulation::divideLabor( void ){ | |
146 | MolToProcMap[i] = which_proc; | |
147 | AtomsPerProc[which_proc] += add_atoms; | |
148 | for (j = 0 ; j < add_atoms; j++ ) { | |
149 | < | atomIndex++; |
150 | < | AtomToProcMap[atomIndex] = which_proc; |
149 | > | AtomToProcMap[atomIndex] = which_proc; |
150 | > | atomIndex++; |
151 | } | |
152 | done = 1; | |
153 | continue; | |
154 | } | |
155 | + | |
156 | + | // If we can add this molecule to this processor without sending |
157 | + | // it above nTarget, then go ahead and do it: |
158 | + | |
159 | + | if (new_atoms <= nTarget) { |
160 | + | MolToProcMap[i] = which_proc; |
161 | + | AtomsPerProc[which_proc] += add_atoms; |
162 | + | for (j = 0 ; j < add_atoms; j++ ) { |
163 | + | AtomToProcMap[atomIndex] = which_proc; |
164 | + | atomIndex++; |
165 | + | } |
166 | + | done = 1; |
167 | + | continue; |
168 | + | } |
169 | ||
183 | – | // The only situation left is where old_atoms < nTarget, but |
184 | – | // new_atoms > nTarget. We want to accept this with some |
185 | – | // probability that dies off the farther we are from nTarget |
170 | ||
171 | + | // The only situation left is when new_atoms > nTarget. We |
172 | + | // want to accept this with some probability that dies off the |
173 | + | // farther we are from nTarget |
174 | + | |
175 | // roughly: x = new_atoms - nTarget | |
176 | // Pacc(x) = exp(- a * x) | |
177 | < | // where a = 1 / (average atoms per molecule) |
177 | > | // where a = penalty / (average atoms per molecule) |
178 | ||
179 | x = (double) (new_atoms - nTarget); | |
180 | y = myRandom->getRandom(); | |
181 | < | |
182 | < | if (exp(- a * x) > y) { |
181 | > | |
182 | > | if (y < exp(- a * x)) { |
183 | MolToProcMap[i] = which_proc; | |
184 | AtomsPerProc[which_proc] += add_atoms; | |
185 | for (j = 0 ; j < add_atoms; j++ ) { | |
186 | < | atomIndex++; |
187 | < | AtomToProcMap[atomIndex] = which_proc; |
188 | < | } |
186 | > | AtomToProcMap[atomIndex] = which_proc; |
187 | > | atomIndex++; |
188 | > | } |
189 | done = 1; | |
190 | continue; | |
191 | } else { | |
# | Line 209 | Line 197 | int* mpiSimulation::divideLabor( void ){ | |
197 | ||
198 | // Spray out this nonsense to all other processors: | |
199 | ||
200 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
201 | < | MPI_INT, 0); |
200 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
201 | > | MPI_INT, 0, MPI_COMM_WORLD); |
202 | ||
203 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
204 | < | MPI_INT, 0); |
203 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
204 | > | MPI_INT, 0, MPI_COMM_WORLD); |
205 | ||
206 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
207 | < | MPI_INT, 0); |
206 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
207 | > | MPI_INT, 0, MPI_COMM_WORLD); |
208 | ||
209 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
210 | < | MPI_INT, 0); |
209 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
210 | > | MPI_INT, 0, MPI_COMM_WORLD); |
211 | } else { | |
212 | ||
213 | // Listen to your marching orders from processor 0: | |
214 | ||
215 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
216 | < | MPI_INT, 0); |
215 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
216 | > | MPI_INT, 0, MPI_COMM_WORLD); |
217 | ||
218 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
219 | < | MPI_INT, 0); |
218 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
219 | > | MPI_INT, 0, MPI_COMM_WORLD); |
220 | ||
221 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
222 | < | MPI_INT, 0); |
221 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
222 | > | MPI_INT, 0, MPI_COMM_WORLD); |
223 | ||
224 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
225 | < | MPI_INT, 0); |
224 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
225 | > | MPI_INT, 0, MPI_COMM_WORLD); |
226 | > | |
227 | > | |
228 | } | |
229 | ||
230 | ||
# | Line 254 | Line 244 | int* mpiSimulation::divideLabor( void ){ | |
244 | } | |
245 | } | |
246 | ||
247 | < | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
248 | < | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
247 | > | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
248 | > | MPI_COMM_WORLD); |
249 | > | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
250 | > | MPI_SUM, MPI_COMM_WORLD); |
251 | ||
252 | if( nmol_global != entryPlug->n_mol ){ | |
253 | sprintf( painCave.errMsg, | |
# | Line 286 | Line 278 | int* mpiSimulation::divideLabor( void ){ | |
278 | local_index = 0; | |
279 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
280 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
289 | – | local_index++; |
281 | globalIndex[local_index] = i; | |
282 | + | local_index++; |
283 | } | |
284 | } | |
285 | < | |
285 | > | |
286 | return globalIndex; | |
287 | } | |
288 | ||
# | Line 300 | Line 292 | void mpiSimulation::mpiRefresh( void ){ | |
292 | int isError, i; | |
293 | int *globalIndex = new int[mpiPlug->myNlocal]; | |
294 | ||
295 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
295 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
296 | > | // not barf |
297 | ||
298 | + | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
299 | + | |
300 | ||
301 | isError = 0; | |
302 | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |