1 |
#ifdef IS_MPI |
2 |
|
3 |
#include <cstdlib> |
4 |
#include <cstring> |
5 |
#include <mpi.h> |
6 |
#include <mpi++.h> |
7 |
|
8 |
#include "mpiSimulation.hpp" |
9 |
#include "simError.h" |
10 |
#include "fortranWrappers.hpp" |
11 |
#include "randomSPRNG.hpp" |
12 |
|
13 |
|
14 |
mpiSimulation* mpiSim; |
15 |
|
16 |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
17 |
{ |
18 |
entryPlug = the_entryPlug; |
19 |
mpiPlug = new mpiSimData; |
20 |
|
21 |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
22 |
mpiPlug->myNode = worldRank; |
23 |
|
24 |
MolToProcMap = new int[entryPlug->n_mol]; |
25 |
MolComponentType = new int[entryPlug->n_mol]; |
26 |
|
27 |
AtomToProcMap = new int[entryPlug->n_atoms]; |
28 |
|
29 |
mpiSim = this; |
30 |
wrapMeSimParallel( this ); |
31 |
} |
32 |
|
33 |
|
34 |
mpiSimulation::~mpiSimulation(){ |
35 |
|
36 |
delete mpiPlug; |
37 |
// perhaps we should let fortran know the party is over. |
38 |
|
39 |
} |
40 |
|
41 |
int* mpiSimulation::divideLabor( void ){ |
42 |
|
43 |
int* globalIndex; |
44 |
|
45 |
int nComponents; |
46 |
MoleculeStamp** compStamps; |
47 |
randomSPRNG myRandom; |
48 |
int* componentsNmol; |
49 |
int* AtomsPerProc; |
50 |
|
51 |
double numerator; |
52 |
double denominator; |
53 |
double precast; |
54 |
double x, y, a; |
55 |
int old_atoms, add_atoms, new_atoms; |
56 |
|
57 |
int nTarget; |
58 |
int molIndex, atomIndex, compIndex, compStart; |
59 |
int done; |
60 |
int nLocal, molLocal; |
61 |
int i, index; |
62 |
int smallDiff, bigDiff; |
63 |
|
64 |
int testSum; |
65 |
|
66 |
nComponents = entryPlug->nComponents; |
67 |
compStamps = entryPlug->compStamps; |
68 |
componentsNmol = entryPlug->componentsNmol; |
69 |
AtomsPerProc = new int[mpiPlug->numberProcessors]; |
70 |
|
71 |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
72 |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
73 |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
74 |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
75 |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
76 |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
77 |
|
78 |
myRandom = new randomSPRNG(); |
79 |
|
80 |
a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
81 |
|
82 |
// Initialize things that we'll send out later: |
83 |
for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
84 |
AtomsPerProc[i] = 0; |
85 |
} |
86 |
for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
87 |
// default to an error condition: |
88 |
MolToProcMap[i] = -1; |
89 |
MolComponentType[i] = -1; |
90 |
} |
91 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
92 |
// default to an error condition: |
93 |
AtomToProcMap[i] = -1; |
94 |
} |
95 |
|
96 |
if (mpiPlug->myNode == 0) { |
97 |
numerator = (double) entryPlug->n_atoms; |
98 |
denominator = (double) mpiPlug->numberProcessors; |
99 |
precast = numerator / denominator; |
100 |
nTarget = (int)( precast + 0.5 ); |
101 |
|
102 |
// Build the array of molecule component types first |
103 |
molIndex = 0; |
104 |
for (i=0; i < nComponents; i++) { |
105 |
for (j=0; j < componentsNmol[i]; j++) { |
106 |
MolComponentType[molIndex] = i; |
107 |
molIndex++; |
108 |
} |
109 |
} |
110 |
|
111 |
atomIndex = 0; |
112 |
|
113 |
for (i = 0; i < molIndex; i++ ) { |
114 |
|
115 |
done = 0; |
116 |
loops = 0; |
117 |
|
118 |
while( !done ){ |
119 |
loops++; |
120 |
|
121 |
// Pick a processor at random |
122 |
|
123 |
which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); |
124 |
|
125 |
// How many atoms does this processor have? |
126 |
|
127 |
old_atoms = AtomsPerProc[which_proc]; |
128 |
|
129 |
// If the processor already had too many atoms, just skip this |
130 |
// processor and try again. |
131 |
|
132 |
if (old_atoms >= nTarget) continue; |
133 |
|
134 |
add_atoms = compStamps[MolComponentType[i]]->getNatoms(); |
135 |
new_atoms = old_atoms + add_atoms; |
136 |
|
137 |
// If we can add this molecule to this processor without sending |
138 |
// it above nTarget, then go ahead and do it: |
139 |
|
140 |
if (new_atoms <= nTarget) { |
141 |
MolToProcMap[i] = which_proc; |
142 |
AtomsPerProc[which_proc] += add_atoms; |
143 |
for (j = 0 ; j < add_atoms; j++ ) { |
144 |
atomIndex++; |
145 |
AtomToProcMap[atomIndex] = which_proc; |
146 |
} |
147 |
done = 1; |
148 |
continue; |
149 |
} |
150 |
|
151 |
// If we've been through this loop too many times, we need |
152 |
// to just give up and assign the molecule to this processor |
153 |
// and be done with it. |
154 |
|
155 |
if (loops > 100) { |
156 |
sprintf( painCave.errMsg, |
157 |
"I've tried 100 times to assign molecule %d to a " |
158 |
" processor, but can't find a good spot.\n" |
159 |
"I'm assigning it at random to processor %d.\n", |
160 |
i, which_proc); |
161 |
painCave.isFatal = 0; |
162 |
simError(); |
163 |
|
164 |
MolToProcMap[i] = which_proc; |
165 |
AtomsPerProc[which_proc] += add_atoms; |
166 |
for (j = 0 ; j < add_atoms; j++ ) { |
167 |
atomIndex++; |
168 |
AtomToProcMap[atomIndex] = which_proc; |
169 |
} |
170 |
done = 1; |
171 |
continue; |
172 |
} |
173 |
|
174 |
// The only situation left is where old_atoms < nTarget, but |
175 |
// new_atoms > nTarget. We want to accept this with some |
176 |
// probability that dies off the farther we are from nTarget |
177 |
|
178 |
// roughly: x = new_atoms - nTarget |
179 |
// Pacc(x) = exp(- a * x) |
180 |
// where a = 1 / (average atoms per molecule) |
181 |
|
182 |
x = (double) (new_atoms - nTarget); |
183 |
y = myRandom.getRandom(); |
184 |
|
185 |
if (exp(- a * x) > y) { |
186 |
MolToProcMap[i] = which_proc; |
187 |
AtomsPerProc[which_proc] += add_atoms; |
188 |
for (j = 0 ; j < add_atoms; j++ ) { |
189 |
atomIndex++; |
190 |
AtomToProcMap[atomIndex] = which_proc; |
191 |
} |
192 |
done = 1; |
193 |
continue; |
194 |
} else { |
195 |
continue; |
196 |
} |
197 |
|
198 |
} |
199 |
} |
200 |
|
201 |
// Spray out this nonsense to all other processors: |
202 |
|
203 |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
204 |
MPI_INT, 0); |
205 |
|
206 |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
207 |
MPI_INT, 0); |
208 |
|
209 |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
210 |
MPI_INT, 0); |
211 |
|
212 |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
213 |
MPI_INT, 0); |
214 |
} else { |
215 |
|
216 |
// Listen to your marching orders from processor 0: |
217 |
|
218 |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
219 |
MPI_INT, 0); |
220 |
|
221 |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
222 |
MPI_INT, 0); |
223 |
|
224 |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
225 |
MPI_INT, 0); |
226 |
|
227 |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
228 |
MPI_INT, 0); |
229 |
} |
230 |
|
231 |
|
232 |
// Let's all check for sanity: |
233 |
|
234 |
nmol_local = 0; |
235 |
for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
236 |
if (MolToProcMap[i] == mpiPlug->myNode) { |
237 |
nmol_local++; |
238 |
} |
239 |
} |
240 |
|
241 |
natoms_local = 0; |
242 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
243 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
244 |
natoms_local++; |
245 |
} |
246 |
} |
247 |
|
248 |
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
249 |
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
250 |
|
251 |
if( nmol_global != entryPlug->n_mol ){ |
252 |
sprintf( painCave.errMsg, |
253 |
"The sum of all nmol_local, %d, did not equal the " |
254 |
"total number of molecules, %d.\n", |
255 |
nmol_global, entryPlug->n_mol ); |
256 |
painCave.isFatal = 1; |
257 |
simError(); |
258 |
} |
259 |
|
260 |
if( natoms_global != entryPlug->n_atoms ){ |
261 |
sprintf( painCave.errMsg, |
262 |
"The sum of all natoms_local, %d, did not equal the " |
263 |
"total number of atoms, %d.\n", |
264 |
natoms_global, entryPlug->n_atoms ); |
265 |
painCave.isFatal = 1; |
266 |
simError(); |
267 |
} |
268 |
|
269 |
sprintf( checkPointMsg, |
270 |
"Successfully divided the molecules among the processors.\n" ); |
271 |
MPIcheckPoint(); |
272 |
|
273 |
mpiPlug->myNMol = nmol_local; |
274 |
mpiPlug->myNlocal = natoms_local; |
275 |
|
276 |
globalIndex = new int[mpiPlug->myNlocal]; |
277 |
local_index = 0; |
278 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
279 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
280 |
local_index++; |
281 |
globalIndex[local_index] = i; |
282 |
} |
283 |
} |
284 |
|
285 |
|
286 |
|
287 |
|
288 |
index = mpiPlug->myAtomStart; |
289 |
// for( i=0; i<mpiPlug->myNlocal; i++){ |
290 |
// globalIndex[i] = index; |
291 |
// index++; |
292 |
// } |
293 |
|
294 |
// return globalIndex; |
295 |
} |
296 |
|
297 |
|
298 |
void mpiSimulation::mpiRefresh( void ){ |
299 |
|
300 |
int isError, i; |
301 |
int *globalIndex = new int[mpiPlug->myNlocal]; |
302 |
|
303 |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
304 |
|
305 |
|
306 |
isError = 0; |
307 |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
308 |
if( isError ){ |
309 |
|
310 |
sprintf( painCave.errMsg, |
311 |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
312 |
painCave.isFatal = 1; |
313 |
simError(); |
314 |
} |
315 |
|
316 |
delete[] globalIndex; |
317 |
|
318 |
sprintf( checkPointMsg, |
319 |
" mpiRefresh successful.\n" ); |
320 |
MPIcheckPoint(); |
321 |
} |
322 |
|
323 |
|
324 |
#endif // is_mpi |