1 |
#ifdef IS_MPI |
2 |
#include <iostream> |
3 |
#include <stdlib.h> |
4 |
#include <string.h> |
5 |
#include <math.h> |
6 |
#include <mpi.h> |
7 |
|
8 |
#include "mpiSimulation.hpp" |
9 |
#include "simError.h" |
10 |
#include "fortranWrappers.hpp" |
11 |
#include "randomSPRNG.hpp" |
12 |
|
13 |
mpiSimulation* mpiSim; |
14 |
|
15 |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
16 |
{ |
17 |
entryPlug = the_entryPlug; |
18 |
mpiPlug = new mpiSimData; |
19 |
|
20 |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); |
21 |
mpiPlug->myNode = worldRank; |
22 |
|
23 |
MolToProcMap = new int[entryPlug->n_mol]; |
24 |
MolComponentType = new int[entryPlug->n_mol]; |
25 |
AtomToProcMap = new int[entryPlug->n_atoms]; |
26 |
|
27 |
mpiSim = this; |
28 |
wrapMeSimParallel( this ); |
29 |
} |
30 |
|
31 |
|
32 |
mpiSimulation::~mpiSimulation(){ |
33 |
|
34 |
delete[] MolToProcMap; |
35 |
delete[] MolComponentType; |
36 |
delete[] AtomToProcMap; |
37 |
|
38 |
delete mpiPlug; |
39 |
// perhaps we should let fortran know the party is over. |
40 |
|
41 |
} |
42 |
|
43 |
void mpiSimulation::divideLabor( ){ |
44 |
|
45 |
int nComponents; |
46 |
MoleculeStamp** compStamps; |
47 |
randomSPRNG *myRandom; |
48 |
int* componentsNmol; |
49 |
int* AtomsPerProc; |
50 |
|
51 |
double numerator; |
52 |
double denominator; |
53 |
double precast; |
54 |
double x, y, a; |
55 |
int old_atoms, add_atoms, new_atoms; |
56 |
|
57 |
int nTarget; |
58 |
int molIndex, atomIndex; |
59 |
int done; |
60 |
int i, j, loops, which_proc, nmol_local, natoms_local; |
61 |
int nmol_global, natoms_global; |
62 |
int local_index; |
63 |
int baseSeed = entryPlug->getSeed(); |
64 |
|
65 |
nComponents = entryPlug->nComponents; |
66 |
compStamps = entryPlug->compStamps; |
67 |
componentsNmol = entryPlug->componentsNmol; |
68 |
AtomsPerProc = new int[mpiPlug->nProcessors]; |
69 |
|
70 |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
71 |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
72 |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
73 |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
74 |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
75 |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
76 |
mpiPlug->nGroupsGlobal = entryPlug->ngroup; |
77 |
|
78 |
myRandom = new randomSPRNG( baseSeed ); |
79 |
|
80 |
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
81 |
|
82 |
// Initialize things that we'll send out later: |
83 |
for (i = 0; i < mpiPlug->nProcessors; i++ ) { |
84 |
AtomsPerProc[i] = 0; |
85 |
} |
86 |
for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
87 |
// default to an error condition: |
88 |
MolToProcMap[i] = -1; |
89 |
MolComponentType[i] = -1; |
90 |
} |
91 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
92 |
// default to an error condition: |
93 |
AtomToProcMap[i] = -1; |
94 |
} |
95 |
|
96 |
if (mpiPlug->myNode == 0) { |
97 |
numerator = (double) entryPlug->n_atoms; |
98 |
denominator = (double) mpiPlug->nProcessors; |
99 |
precast = numerator / denominator; |
100 |
nTarget = (int)( precast + 0.5 ); |
101 |
|
102 |
// Build the array of molecule component types first |
103 |
molIndex = 0; |
104 |
for (i=0; i < nComponents; i++) { |
105 |
for (j=0; j < componentsNmol[i]; j++) { |
106 |
MolComponentType[molIndex] = i; |
107 |
molIndex++; |
108 |
} |
109 |
} |
110 |
|
111 |
atomIndex = 0; |
112 |
|
113 |
for (i = 0; i < molIndex; i++ ) { |
114 |
|
115 |
done = 0; |
116 |
loops = 0; |
117 |
|
118 |
while( !done ){ |
119 |
loops++; |
120 |
|
121 |
// Pick a processor at random |
122 |
|
123 |
which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); |
124 |
|
125 |
// How many atoms does this processor have? |
126 |
|
127 |
old_atoms = AtomsPerProc[which_proc]; |
128 |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
129 |
new_atoms = old_atoms + add_atoms; |
130 |
|
131 |
// If we've been through this loop too many times, we need |
132 |
// to just give up and assign the molecule to this processor |
133 |
// and be done with it. |
134 |
|
135 |
if (loops > 100) { |
136 |
sprintf( painCave.errMsg, |
137 |
"I've tried 100 times to assign molecule %d to a " |
138 |
" processor, but can't find a good spot.\n" |
139 |
"I'm assigning it at random to processor %d.\n", |
140 |
i, which_proc); |
141 |
painCave.isFatal = 0; |
142 |
simError(); |
143 |
|
144 |
MolToProcMap[i] = which_proc; |
145 |
AtomsPerProc[which_proc] += add_atoms; |
146 |
for (j = 0 ; j < add_atoms; j++ ) { |
147 |
AtomToProcMap[atomIndex] = which_proc; |
148 |
atomIndex++; |
149 |
} |
150 |
done = 1; |
151 |
continue; |
152 |
} |
153 |
|
154 |
// If we can add this molecule to this processor without sending |
155 |
// it above nTarget, then go ahead and do it: |
156 |
|
157 |
if (new_atoms <= nTarget) { |
158 |
MolToProcMap[i] = which_proc; |
159 |
AtomsPerProc[which_proc] += add_atoms; |
160 |
for (j = 0 ; j < add_atoms; j++ ) { |
161 |
AtomToProcMap[atomIndex] = which_proc; |
162 |
atomIndex++; |
163 |
} |
164 |
done = 1; |
165 |
continue; |
166 |
} |
167 |
|
168 |
|
169 |
// The only situation left is when new_atoms > nTarget. We |
170 |
// want to accept this with some probability that dies off the |
171 |
// farther we are from nTarget |
172 |
|
173 |
// roughly: x = new_atoms - nTarget |
174 |
// Pacc(x) = exp(- a * x) |
175 |
// where a = penalty / (average atoms per molecule) |
176 |
|
177 |
x = (double) (new_atoms - nTarget); |
178 |
y = myRandom->getRandom(); |
179 |
|
180 |
if (y < exp(- a * x)) { |
181 |
MolToProcMap[i] = which_proc; |
182 |
AtomsPerProc[which_proc] += add_atoms; |
183 |
for (j = 0 ; j < add_atoms; j++ ) { |
184 |
AtomToProcMap[atomIndex] = which_proc; |
185 |
atomIndex++; |
186 |
} |
187 |
done = 1; |
188 |
continue; |
189 |
} else { |
190 |
continue; |
191 |
} |
192 |
|
193 |
} |
194 |
} |
195 |
|
196 |
// Spray out this nonsense to all other processors: |
197 |
|
198 |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
199 |
MPI_INT, 0, MPI_COMM_WORLD); |
200 |
|
201 |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
202 |
MPI_INT, 0, MPI_COMM_WORLD); |
203 |
|
204 |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
205 |
MPI_INT, 0, MPI_COMM_WORLD); |
206 |
|
207 |
MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
208 |
MPI_INT, 0, MPI_COMM_WORLD); |
209 |
} else { |
210 |
|
211 |
// Listen to your marching orders from processor 0: |
212 |
|
213 |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
214 |
MPI_INT, 0, MPI_COMM_WORLD); |
215 |
|
216 |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
217 |
MPI_INT, 0, MPI_COMM_WORLD); |
218 |
|
219 |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
220 |
MPI_INT, 0, MPI_COMM_WORLD); |
221 |
|
222 |
MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
223 |
MPI_INT, 0, MPI_COMM_WORLD); |
224 |
|
225 |
|
226 |
} |
227 |
|
228 |
|
229 |
// Let's all check for sanity: |
230 |
|
231 |
nmol_local = 0; |
232 |
for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
233 |
if (MolToProcMap[i] == mpiPlug->myNode) { |
234 |
nmol_local++; |
235 |
} |
236 |
} |
237 |
|
238 |
natoms_local = 0; |
239 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
240 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
241 |
natoms_local++; |
242 |
} |
243 |
} |
244 |
|
245 |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
246 |
MPI_COMM_WORLD); |
247 |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
248 |
MPI_SUM, MPI_COMM_WORLD); |
249 |
|
250 |
if( nmol_global != entryPlug->n_mol ){ |
251 |
sprintf( painCave.errMsg, |
252 |
"The sum of all nmol_local, %d, did not equal the " |
253 |
"total number of molecules, %d.\n", |
254 |
nmol_global, entryPlug->n_mol ); |
255 |
painCave.isFatal = 1; |
256 |
simError(); |
257 |
} |
258 |
|
259 |
if( natoms_global != entryPlug->n_atoms ){ |
260 |
sprintf( painCave.errMsg, |
261 |
"The sum of all natoms_local, %d, did not equal the " |
262 |
"total number of atoms, %d.\n", |
263 |
natoms_global, entryPlug->n_atoms ); |
264 |
painCave.isFatal = 1; |
265 |
simError(); |
266 |
} |
267 |
|
268 |
sprintf( checkPointMsg, |
269 |
"Successfully divided the molecules among the processors.\n" ); |
270 |
MPIcheckPoint(); |
271 |
|
272 |
mpiPlug->nMolLocal = nmol_local; |
273 |
mpiPlug->nAtomsLocal = natoms_local; |
274 |
|
275 |
globalAtomIndex.resize(mpiPlug->nAtomsLocal); |
276 |
globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); |
277 |
local_index = 0; |
278 |
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
279 |
if (AtomToProcMap[i] == mpiPlug->myNode) { |
280 |
globalAtomIndex[local_index] = i; |
281 |
|
282 |
globalToLocalAtom[i] = local_index; |
283 |
local_index++; |
284 |
|
285 |
} |
286 |
else |
287 |
globalToLocalAtom[i] = -1; |
288 |
} |
289 |
|
290 |
globalMolIndex.resize(mpiPlug->nMolLocal); |
291 |
globalToLocalMol.resize(mpiPlug->nMolGlobal); |
292 |
|
293 |
local_index = 0; |
294 |
for (i = 0; i < mpiPlug->nMolGlobal; i++) { |
295 |
if (MolToProcMap[i] == mpiPlug->myNode) { |
296 |
globalMolIndex[local_index] = i; |
297 |
globalToLocalMol[i] = local_index; |
298 |
local_index++; |
299 |
} |
300 |
else |
301 |
globalToLocalMol[i] = -1; |
302 |
} |
303 |
|
304 |
} |
305 |
|
306 |
|
307 |
void mpiSimulation::mpiRefresh( void ){ |
308 |
|
309 |
int isError, i; |
310 |
int *globalIndex = new int[mpiPlug->nAtomsLocal]; |
311 |
|
312 |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
313 |
// not barf |
314 |
|
315 |
for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
316 |
|
317 |
|
318 |
isError = 0; |
319 |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
320 |
if( isError ){ |
321 |
|
322 |
sprintf( painCave.errMsg, |
323 |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
324 |
painCave.isFatal = 1; |
325 |
simError(); |
326 |
} |
327 |
|
328 |
delete[] globalIndex; |
329 |
|
330 |
sprintf( checkPointMsg, |
331 |
" mpiRefresh successful.\n" ); |
332 |
MPIcheckPoint(); |
333 |
} |
334 |
|
335 |
|
336 |
#endif // is_mpi |