1 |
|
#ifdef IS_MPI |
2 |
< |
|
2 |
> |
#include <iostream> |
3 |
|
#include <cstdlib> |
4 |
|
#include <cstring> |
5 |
|
#include <cmath> |
6 |
|
#include <mpi.h> |
7 |
– |
#include <mpi++.h> |
7 |
|
|
8 |
|
#include "mpiSimulation.hpp" |
9 |
|
#include "simError.h" |
10 |
|
#include "fortranWrappers.hpp" |
11 |
|
#include "randomSPRNG.hpp" |
12 |
|
|
14 |
– |
#define BASE_SEED 123456789 |
15 |
– |
|
13 |
|
mpiSimulation* mpiSim; |
14 |
|
|
15 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
17 |
|
entryPlug = the_entryPlug; |
18 |
|
mpiPlug = new mpiSimData; |
19 |
|
|
20 |
< |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
20 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
21 |
|
mpiPlug->myNode = worldRank; |
22 |
|
|
23 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
24 |
|
MolComponentType = new int[entryPlug->n_mol]; |
28 |
– |
|
25 |
|
AtomToProcMap = new int[entryPlug->n_atoms]; |
26 |
|
|
27 |
|
mpiSim = this; |
64 |
|
int nmol_global, natoms_global; |
65 |
|
int local_index, index; |
66 |
|
int smallDiff, bigDiff; |
67 |
< |
int baseSeed = BASE_SEED; |
67 |
> |
int baseSeed = entryPlug->getSeed(); |
68 |
|
|
69 |
|
int testSum; |
70 |
|
|
80 |
|
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
81 |
|
mpiPlug->nMolGlobal = entryPlug->n_mol; |
82 |
|
|
83 |
+ |
|
84 |
|
myRandom = new randomSPRNG( baseSeed ); |
85 |
|
|
86 |
< |
a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
86 |
> |
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
87 |
|
|
88 |
|
// Initialize things that we'll send out later: |
89 |
|
for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
131 |
|
// How many atoms does this processor have? |
132 |
|
|
133 |
|
old_atoms = AtomsPerProc[which_proc]; |
137 |
– |
|
138 |
– |
// If the processor already had too many atoms, just skip this |
139 |
– |
// processor and try again. |
140 |
– |
|
141 |
– |
if (old_atoms >= nTarget) continue; |
142 |
– |
|
134 |
|
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
135 |
|
new_atoms = old_atoms + add_atoms; |
145 |
– |
|
146 |
– |
// If we can add this molecule to this processor without sending |
147 |
– |
// it above nTarget, then go ahead and do it: |
148 |
– |
|
149 |
– |
if (new_atoms <= nTarget) { |
150 |
– |
MolToProcMap[i] = which_proc; |
151 |
– |
AtomsPerProc[which_proc] += add_atoms; |
152 |
– |
for (j = 0 ; j < add_atoms; j++ ) { |
153 |
– |
atomIndex++; |
154 |
– |
AtomToProcMap[atomIndex] = which_proc; |
155 |
– |
} |
156 |
– |
done = 1; |
157 |
– |
continue; |
158 |
– |
} |
136 |
|
|
137 |
|
// If we've been through this loop too many times, we need |
138 |
|
// to just give up and assign the molecule to this processor |
150 |
|
MolToProcMap[i] = which_proc; |
151 |
|
AtomsPerProc[which_proc] += add_atoms; |
152 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
153 |
< |
atomIndex++; |
154 |
< |
AtomToProcMap[atomIndex] = which_proc; |
153 |
> |
AtomToProcMap[atomIndex] = which_proc; |
154 |
> |
atomIndex++; |
155 |
|
} |
156 |
|
done = 1; |
157 |
|
continue; |
158 |
|
} |
159 |
+ |
|
160 |
+ |
// If we can add this molecule to this processor without sending |
161 |
+ |
// it above nTarget, then go ahead and do it: |
162 |
+ |
|
163 |
+ |
if (new_atoms <= nTarget) { |
164 |
+ |
MolToProcMap[i] = which_proc; |
165 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
166 |
+ |
for (j = 0 ; j < add_atoms; j++ ) { |
167 |
+ |
AtomToProcMap[atomIndex] = which_proc; |
168 |
+ |
atomIndex++; |
169 |
+ |
} |
170 |
+ |
done = 1; |
171 |
+ |
continue; |
172 |
+ |
} |
173 |
|
|
183 |
– |
// The only situation left is where old_atoms < nTarget, but |
184 |
– |
// new_atoms > nTarget. We want to accept this with some |
185 |
– |
// probability that dies off the farther we are from nTarget |
174 |
|
|
175 |
+ |
// The only situation left is when new_atoms > nTarget. We |
176 |
+ |
// want to accept this with some probability that dies off the |
177 |
+ |
// farther we are from nTarget |
178 |
+ |
|
179 |
|
// roughly: x = new_atoms - nTarget |
180 |
|
// Pacc(x) = exp(- a * x) |
181 |
< |
// where a = 1 / (average atoms per molecule) |
181 |
> |
// where a = penalty / (average atoms per molecule) |
182 |
|
|
183 |
|
x = (double) (new_atoms - nTarget); |
184 |
|
y = myRandom->getRandom(); |
185 |
< |
|
186 |
< |
if (exp(- a * x) > y) { |
185 |
> |
|
186 |
> |
if (y < exp(- a * x)) { |
187 |
|
MolToProcMap[i] = which_proc; |
188 |
|
AtomsPerProc[which_proc] += add_atoms; |
189 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
190 |
< |
atomIndex++; |
191 |
< |
AtomToProcMap[atomIndex] = which_proc; |
192 |
< |
} |
190 |
> |
AtomToProcMap[atomIndex] = which_proc; |
191 |
> |
atomIndex++; |
192 |
> |
} |
193 |
|
done = 1; |
194 |
|
continue; |
195 |
|
} else { |
201 |
|
|
202 |
|
// Spray out this nonsense to all other processors: |
203 |
|
|
204 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
205 |
< |
MPI_INT, 0); |
204 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
205 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
206 |
|
|
207 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
208 |
< |
MPI_INT, 0); |
207 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
208 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
209 |
|
|
210 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
211 |
< |
MPI_INT, 0); |
210 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
211 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
212 |
|
|
213 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
214 |
< |
MPI_INT, 0); |
213 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
214 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
215 |
|
} else { |
216 |
|
|
217 |
|
// Listen to your marching orders from processor 0: |
218 |
|
|
219 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
220 |
< |
MPI_INT, 0); |
219 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
220 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
221 |
|
|
222 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
223 |
< |
MPI_INT, 0); |
222 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
223 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
224 |
|
|
225 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
226 |
< |
MPI_INT, 0); |
225 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
226 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
227 |
|
|
228 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
229 |
< |
MPI_INT, 0); |
228 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
229 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
230 |
> |
|
231 |
> |
|
232 |
|
} |
233 |
|
|
234 |
|
|
248 |
|
} |
249 |
|
} |
250 |
|
|
251 |
< |
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
252 |
< |
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
251 |
> |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
252 |
> |
MPI_COMM_WORLD); |
253 |
> |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
254 |
> |
MPI_SUM, MPI_COMM_WORLD); |
255 |
|
|
256 |
|
if( nmol_global != entryPlug->n_mol ){ |
257 |
|
sprintf( painCave.errMsg, |
282 |
|
local_index = 0; |
283 |
|
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
284 |
|
if (AtomToProcMap[i] == mpiPlug->myNode) { |
289 |
– |
local_index++; |
285 |
|
globalIndex[local_index] = i; |
286 |
+ |
local_index++; |
287 |
|
} |
288 |
|
} |
289 |
< |
|
289 |
> |
|
290 |
|
return globalIndex; |
291 |
|
} |
292 |
|
|
296 |
|
int isError, i; |
297 |
|
int *globalIndex = new int[mpiPlug->myNlocal]; |
298 |
|
|
299 |
< |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
299 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
300 |
> |
// not barf |
301 |
|
|
302 |
+ |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
303 |
+ |
|
304 |
|
|
305 |
|
isError = 0; |
306 |
|
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |