# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
2 | > | #include <iostream> |
3 | #include <cstdlib> | |
4 | #include <cstring> | |
5 | #include <cmath> | |
6 | #include <mpi.h> | |
7 | – | #include <mpi++.h> |
7 | ||
8 | #include "mpiSimulation.hpp" | |
9 | #include "simError.h" | |
# | Line 20 | Line 19 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
19 | entryPlug = the_entryPlug; | |
20 | mpiPlug = new mpiSimData; | |
21 | ||
22 | < | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
22 | > | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
23 | mpiPlug->myNode = worldRank; | |
24 | ||
25 | MolToProcMap = new int[entryPlug->n_mol]; | |
26 | MolComponentType = new int[entryPlug->n_mol]; | |
28 | – | |
27 | AtomToProcMap = new int[entryPlug->n_atoms]; | |
28 | ||
29 | mpiSim = this; | |
# | Line 86 | Line 84 | int* mpiSimulation::divideLabor( void ){ | |
84 | ||
85 | myRandom = new randomSPRNG( baseSeed ); | |
86 | ||
87 | < | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
87 | > | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
88 | ||
89 | // Initialize things that we'll send out later: | |
90 | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { | |
# | Line 134 | Line 132 | int* mpiSimulation::divideLabor( void ){ | |
132 | // How many atoms does this processor have? | |
133 | ||
134 | old_atoms = AtomsPerProc[which_proc]; | |
137 | – | |
138 | – | // If the processor already had too many atoms, just skip this |
139 | – | // processor and try again. |
140 | – | |
141 | – | if (old_atoms >= nTarget) continue; |
142 | – | |
135 | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); | |
136 | new_atoms = old_atoms + add_atoms; | |
145 | – | |
146 | – | // If we can add this molecule to this processor without sending |
147 | – | // it above nTarget, then go ahead and do it: |
148 | – | |
149 | – | if (new_atoms <= nTarget) { |
150 | – | MolToProcMap[i] = which_proc; |
151 | – | AtomsPerProc[which_proc] += add_atoms; |
152 | – | for (j = 0 ; j < add_atoms; j++ ) { |
153 | – | atomIndex++; |
154 | – | AtomToProcMap[atomIndex] = which_proc; |
155 | – | } |
156 | – | done = 1; |
157 | – | continue; |
158 | – | } |
137 | ||
138 | // If we've been through this loop too many times, we need | |
139 | // to just give up and assign the molecule to this processor | |
# | Line 173 | Line 151 | int* mpiSimulation::divideLabor( void ){ | |
151 | MolToProcMap[i] = which_proc; | |
152 | AtomsPerProc[which_proc] += add_atoms; | |
153 | for (j = 0 ; j < add_atoms; j++ ) { | |
154 | < | atomIndex++; |
155 | < | AtomToProcMap[atomIndex] = which_proc; |
154 | > | AtomToProcMap[atomIndex] = which_proc; |
155 | > | atomIndex++; |
156 | } | |
157 | done = 1; | |
158 | continue; | |
159 | } | |
160 | + | |
161 | + | // If we can add this molecule to this processor without sending |
162 | + | // it above nTarget, then go ahead and do it: |
163 | + | |
164 | + | if (new_atoms <= nTarget) { |
165 | + | MolToProcMap[i] = which_proc; |
166 | + | AtomsPerProc[which_proc] += add_atoms; |
167 | + | for (j = 0 ; j < add_atoms; j++ ) { |
168 | + | AtomToProcMap[atomIndex] = which_proc; |
169 | + | atomIndex++; |
170 | + | } |
171 | + | done = 1; |
172 | + | continue; |
173 | + | } |
174 | ||
183 | – | // The only situation left is where old_atoms < nTarget, but |
184 | – | // new_atoms > nTarget. We want to accept this with some |
185 | – | // probability that dies off the farther we are from nTarget |
175 | ||
176 | + | // The only situation left is when new_atoms > nTarget. We |
177 | + | // want to accept this with some probability that dies off the |
178 | + | // farther we are from nTarget |
179 | + | |
180 | // roughly: x = new_atoms - nTarget | |
181 | // Pacc(x) = exp(- a * x) | |
182 | < | // where a = 1 / (average atoms per molecule) |
182 | > | // where a = penalty / (average atoms per molecule) |
183 | ||
184 | x = (double) (new_atoms - nTarget); | |
185 | y = myRandom->getRandom(); | |
186 | < | |
187 | < | if (exp(- a * x) > y) { |
186 | > | |
187 | > | if (y < exp(- a * x)) { |
188 | MolToProcMap[i] = which_proc; | |
189 | AtomsPerProc[which_proc] += add_atoms; | |
190 | for (j = 0 ; j < add_atoms; j++ ) { | |
191 | < | atomIndex++; |
192 | < | AtomToProcMap[atomIndex] = which_proc; |
193 | < | } |
191 | > | AtomToProcMap[atomIndex] = which_proc; |
192 | > | atomIndex++; |
193 | > | } |
194 | done = 1; | |
195 | continue; | |
196 | } else { | |
# | Line 209 | Line 202 | int* mpiSimulation::divideLabor( void ){ | |
202 | ||
203 | // Spray out this nonsense to all other processors: | |
204 | ||
205 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 | < | MPI_INT, 0); |
205 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 | > | MPI_INT, 0, MPI_COMM_WORLD); |
207 | ||
208 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 | < | MPI_INT, 0); |
208 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 | > | MPI_INT, 0, MPI_COMM_WORLD); |
210 | ||
211 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 | < | MPI_INT, 0); |
211 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 | > | MPI_INT, 0, MPI_COMM_WORLD); |
213 | ||
214 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 | < | MPI_INT, 0); |
214 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 | > | MPI_INT, 0, MPI_COMM_WORLD); |
216 | } else { | |
217 | ||
218 | // Listen to your marching orders from processor 0: | |
219 | ||
220 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 | < | MPI_INT, 0); |
220 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 | > | MPI_INT, 0, MPI_COMM_WORLD); |
222 | ||
223 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 | < | MPI_INT, 0); |
223 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 | > | MPI_INT, 0, MPI_COMM_WORLD); |
225 | ||
226 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 | < | MPI_INT, 0); |
226 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 | > | MPI_INT, 0, MPI_COMM_WORLD); |
228 | ||
229 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 | < | MPI_INT, 0); |
229 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 | > | MPI_INT, 0, MPI_COMM_WORLD); |
231 | > | |
232 | > | |
233 | } | |
234 | ||
235 | ||
# | Line 254 | Line 249 | int* mpiSimulation::divideLabor( void ){ | |
249 | } | |
250 | } | |
251 | ||
252 | < | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
253 | < | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
252 | > | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
253 | > | MPI_COMM_WORLD); |
254 | > | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
255 | > | MPI_SUM, MPI_COMM_WORLD); |
256 | ||
257 | if( nmol_global != entryPlug->n_mol ){ | |
258 | sprintf( painCave.errMsg, | |
# | Line 286 | Line 283 | int* mpiSimulation::divideLabor( void ){ | |
283 | local_index = 0; | |
284 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
285 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
289 | – | local_index++; |
286 | globalIndex[local_index] = i; | |
287 | + | local_index++; |
288 | } | |
289 | } | |
290 | < | |
290 | > | |
291 | return globalIndex; | |
292 | } | |
293 | ||
# | Line 300 | Line 297 | void mpiSimulation::mpiRefresh( void ){ | |
297 | int isError, i; | |
298 | int *globalIndex = new int[mpiPlug->myNlocal]; | |
299 | ||
300 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
300 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
301 | > | // not barf |
302 | ||
303 | + | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
304 | + | |
305 | ||
306 | isError = 0; | |
307 | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |