1 |
#ifdef IS_MPI |
2 |
#include <iostream> |
3 |
#include <cstdlib> |
4 |
#include <cstring> |
5 |
#include <cmath> |
6 |
#include <mpi.h> |
7 |
|
8 |
#include "mpiSimulation.hpp" |
9 |
#include "simError.h" |
10 |
#include "fortranWrappers.hpp" |
11 |
#include "randomSPRNG.hpp" |
12 |
|
13 |
#define BASE_SEED 123456789 |
14 |
|
15 |
mpiSimulation* mpiSim; |
16 |
|
17 |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
18 |
{ |
19 |
entryPlug = the_entryPlug; |
20 |
mpiPlug = new mpiSimData; |
21 |
|
22 |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
23 |
mpiPlug->myNode = worldRank; |
24 |
|
25 |
MolToProcMap = new int[entryPlug->n_mol]; |
26 |
MolComponentType = new int[entryPlug->n_mol]; |
27 |
AtomToProcMap = new int[entryPlug->n_atoms]; |
28 |
|
29 |
mpiSim = this; |
30 |
wrapMeSimParallel( this ); |
31 |
} |
32 |
|
33 |
|
34 |
mpiSimulation::~mpiSimulation(){ |
35 |
|
36 |
delete[] MolToProcMap; |
37 |
delete[] MolComponentType; |
38 |
delete[] AtomToProcMap; |
39 |
|
40 |
delete mpiPlug; |
41 |
// perhaps we should let fortran know the party is over. |
42 |
|
43 |
} |
44 |
|
45 |
int* mpiSimulation::divideLabor( void ){ |
46 |
|
47 |
int* globalIndex; |
48 |
|
49 |
int nComponents; |
50 |
MoleculeStamp** compStamps; |
51 |
randomSPRNG *myRandom; |
52 |
int* componentsNmol; |
53 |
int* AtomsPerProc; |
54 |
|
55 |
double numerator; |
56 |
double denominator; |
57 |
double precast; |
58 |
double x, y, a; |
59 |
int old_atoms, add_atoms, new_atoms; |
60 |
|
61 |
int nTarget; |
62 |
int molIndex, atomIndex, compIndex, compStart; |
63 |
int done; |
64 |
int nLocal, molLocal; |
65 |
int i, j, loops, which_proc, nmol_local, natoms_local; |
66 |
int nmol_global, natoms_global; |
67 |
int local_index, index; |
68 |
int smallDiff, bigDiff; |
69 |
int baseSeed = BASE_SEED; |
70 |
|
71 |
int testSum; |
72 |
|
73 |
nComponents = entryPlug->nComponents; |
74 |
compStamps = entryPlug->compStamps; |
75 |
componentsNmol = entryPlug->componentsNmol; |
76 |
AtomsPerProc = new int[mpiPlug->numberProcessors]; |
77 |
|
78 |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
79 |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
80 |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
81 |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
82 |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
83 |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
84 |
|
85 |
myRandom = new randomSPRNG( baseSeed ); |
86 |
|
87 |
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
88 |
|
89 |
// Initialize things that we'll send out later: |
90 |
for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
91 |
AtomsPerProc[i] = 0; |
92 |
} |
93 |
for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
94 |
// default to an error condition: |
95 |
MolToProcMap[i] = -1; |
96 |
MolComponentType[i] = -1; |
97 |
} |
98 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
99 |
// default to an error condition: |
100 |
AtomToProcMap[i] = -1; |
101 |
} |
102 |
|
103 |
if (mpiPlug->myNode == 0) { |
104 |
numerator = (double) entryPlug->n_atoms; |
105 |
denominator = (double) mpiPlug->numberProcessors; |
106 |
precast = numerator / denominator; |
107 |
nTarget = (int)( precast + 0.5 ); |
108 |
|
109 |
// Build the array of molecule component types first |
110 |
molIndex = 0; |
111 |
for (i=0; i < nComponents; i++) { |
112 |
for (j=0; j < componentsNmol[i]; j++) { |
113 |
MolComponentType[molIndex] = i; |
114 |
molIndex++; |
115 |
} |
116 |
} |
117 |
|
118 |
atomIndex = 0; |
119 |
|
120 |
for (i = 0; i < molIndex; i++ ) { |
121 |
|
122 |
done = 0; |
123 |
loops = 0; |
124 |
|
125 |
while( !done ){ |
126 |
loops++; |
127 |
|
128 |
// Pick a processor at random |
129 |
|
130 |
which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
131 |
|
132 |
// How many atoms does this processor have? |
133 |
|
134 |
old_atoms = AtomsPerProc[which_proc]; |
135 |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
136 |
new_atoms = old_atoms + add_atoms; |
137 |
|
138 |
// If we've been through this loop too many times, we need |
139 |
// to just give up and assign the molecule to this processor |
140 |
// and be done with it. |
141 |
|
142 |
if (loops > 100) { |
143 |
sprintf( painCave.errMsg, |
144 |
"I've tried 100 times to assign molecule %d to a " |
145 |
" processor, but can't find a good spot.\n" |
146 |
"I'm assigning it at random to processor %d.\n", |
147 |
i, which_proc); |
148 |
painCave.isFatal = 0; |
149 |
simError(); |
150 |
|
151 |
MolToProcMap[i] = which_proc; |
152 |
AtomsPerProc[which_proc] += add_atoms; |
153 |
for (j = 0 ; j < add_atoms; j++ ) { |
154 |
AtomToProcMap[atomIndex] = which_proc; |
155 |
atomIndex++; |
156 |
} |
157 |
done = 1; |
158 |
continue; |
159 |
} |
160 |
|
161 |
// If we can add this molecule to this processor without sending |
162 |
// it above nTarget, then go ahead and do it: |
163 |
|
164 |
if (new_atoms <= nTarget) { |
165 |
MolToProcMap[i] = which_proc; |
166 |
AtomsPerProc[which_proc] += add_atoms; |
167 |
for (j = 0 ; j < add_atoms; j++ ) { |
168 |
AtomToProcMap[atomIndex] = which_proc; |
169 |
atomIndex++; |
170 |
} |
171 |
done = 1; |
172 |
continue; |
173 |
} |
174 |
|
175 |
|
176 |
// The only situation left is when new_atoms > nTarget. We |
177 |
// want to accept this with some probability that dies off the |
178 |
// farther we are from nTarget |
179 |
|
180 |
// roughly: x = new_atoms - nTarget |
181 |
// Pacc(x) = exp(- a * x) |
182 |
// where a = penalty / (average atoms per molecule) |
183 |
|
184 |
x = (double) (new_atoms - nTarget); |
185 |
y = myRandom->getRandom(); |
186 |
|
187 |
if (y < exp(- a * x)) { |
188 |
MolToProcMap[i] = which_proc; |
189 |
AtomsPerProc[which_proc] += add_atoms; |
190 |
for (j = 0 ; j < add_atoms; j++ ) { |
191 |
AtomToProcMap[atomIndex] = which_proc; |
192 |
atomIndex++; |
193 |
} |
194 |
done = 1; |
195 |
continue; |
196 |
} else { |
197 |
continue; |
198 |
} |
199 |
|
200 |
} |
201 |
} |
202 |
|
203 |
// Spray out this nonsense to all other processors: |
204 |
|
205 |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 |
MPI_INT, 0, MPI_COMM_WORLD); |
207 |
|
208 |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 |
MPI_INT, 0, MPI_COMM_WORLD); |
210 |
|
211 |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 |
MPI_INT, 0, MPI_COMM_WORLD); |
213 |
|
214 |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 |
MPI_INT, 0, MPI_COMM_WORLD); |
216 |
} else { |
217 |
|
218 |
// Listen to your marching orders from processor 0: |
219 |
|
220 |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 |
MPI_INT, 0, MPI_COMM_WORLD); |
222 |
|
223 |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 |
MPI_INT, 0, MPI_COMM_WORLD); |
225 |
|
226 |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 |
MPI_INT, 0, MPI_COMM_WORLD); |
228 |
|
229 |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 |
MPI_INT, 0, MPI_COMM_WORLD); |
231 |
|
232 |
|
233 |
} |
234 |
|
235 |
|
236 |
// Let's all check for sanity: |
237 |
|
238 |
nmol_local = 0; |
239 |
for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
240 |
if (MolToProcMap[i] == mpiPlug->myNode) { |
241 |
nmol_local++; |
242 |
} |
243 |
} |
244 |
|
245 |
natoms_local = 0; |
246 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
247 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
248 |
natoms_local++; |
249 |
} |
250 |
} |
251 |
|
252 |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
253 |
MPI_COMM_WORLD); |
254 |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
255 |
MPI_SUM, MPI_COMM_WORLD); |
256 |
|
257 |
if( nmol_global != entryPlug->n_mol ){ |
258 |
sprintf( painCave.errMsg, |
259 |
"The sum of all nmol_local, %d, did not equal the " |
260 |
"total number of molecules, %d.\n", |
261 |
nmol_global, entryPlug->n_mol ); |
262 |
painCave.isFatal = 1; |
263 |
simError(); |
264 |
} |
265 |
|
266 |
if( natoms_global != entryPlug->n_atoms ){ |
267 |
sprintf( painCave.errMsg, |
268 |
"The sum of all natoms_local, %d, did not equal the " |
269 |
"total number of atoms, %d.\n", |
270 |
natoms_global, entryPlug->n_atoms ); |
271 |
painCave.isFatal = 1; |
272 |
simError(); |
273 |
} |
274 |
|
275 |
sprintf( checkPointMsg, |
276 |
"Successfully divided the molecules among the processors.\n" ); |
277 |
MPIcheckPoint(); |
278 |
|
279 |
mpiPlug->myNMol = nmol_local; |
280 |
mpiPlug->myNlocal = natoms_local; |
281 |
|
282 |
globalIndex = new int[mpiPlug->myNlocal]; |
283 |
local_index = 0; |
284 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
285 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
286 |
globalIndex[local_index] = i; |
287 |
local_index++; |
288 |
} |
289 |
} |
290 |
|
291 |
return globalIndex; |
292 |
} |
293 |
|
294 |
|
295 |
void mpiSimulation::mpiRefresh( void ){ |
296 |
|
297 |
int isError, i; |
298 |
int *globalIndex = new int[mpiPlug->myNlocal]; |
299 |
|
300 |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
301 |
// not barf |
302 |
|
303 |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
304 |
|
305 |
|
306 |
isError = 0; |
307 |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
308 |
if( isError ){ |
309 |
|
310 |
sprintf( painCave.errMsg, |
311 |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
312 |
painCave.isFatal = 1; |
313 |
simError(); |
314 |
} |
315 |
|
316 |
delete[] globalIndex; |
317 |
|
318 |
sprintf( checkPointMsg, |
319 |
" mpiRefresh successful.\n" ); |
320 |
MPIcheckPoint(); |
321 |
} |
322 |
|
323 |
|
324 |
#endif // is_mpi |