14 |
|
|
15 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
16 |
|
{ |
17 |
– |
entryPlug = the_entryPlug; |
17 |
|
parallelData = new mpiSimData; |
18 |
|
|
19 |
|
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
20 |
|
parallelData->myNode = worldRank; |
21 |
|
|
22 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
24 |
– |
MolComponentType = new int[entryPlug->n_mol]; |
25 |
– |
AtomToProcMap = new int[entryPlug->n_atoms]; |
26 |
– |
GroupToProcMap = new int[entryPlug->ngroup]; |
23 |
|
|
28 |
– |
mpiSim = this; |
24 |
|
} |
25 |
|
|
26 |
|
|
27 |
|
mpiSimulation::~mpiSimulation(){ |
28 |
|
|
29 |
|
delete[] MolToProcMap; |
35 |
– |
delete[] MolComponentType; |
36 |
– |
delete[] AtomToProcMap; |
37 |
– |
delete[] GroupToProcMap; |
30 |
|
|
31 |
|
delete parallelData; |
32 |
|
// perhaps we should let fortran know the party is over. |
68 |
|
GroupsPerProc = new int[parallelData->nProcessors]; |
69 |
|
|
70 |
|
parallelData->nAtomsGlobal = entryPlug->n_atoms; |
79 |
– |
parallelData->nBondsGlobal = entryPlug->n_bonds; |
80 |
– |
parallelData->nBendsGlobal = entryPlug->n_bends; |
81 |
– |
parallelData->nTorsionsGlobal = entryPlug->n_torsions; |
82 |
– |
parallelData->nSRIGlobal = entryPlug->n_SRI; |
71 |
|
parallelData->nGroupsGlobal = entryPlug->ngroup; |
72 |
|
parallelData->nMolGlobal = entryPlug->n_mol; |
73 |
|
|
97 |
|
for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
98 |
|
// default to an error condition: |
99 |
|
MolToProcMap[i] = -1; |
112 |
– |
MolComponentType[i] = -1; |
100 |
|
} |
101 |
< |
for (i = 0; i < parallelData->nAtomsGlobal; i++ ) { |
115 |
< |
// default to an error condition: |
116 |
< |
AtomToProcMap[i] = -1; |
117 |
< |
} |
118 |
< |
for (i = 0; i < parallelData->nGroupsGlobal; i++ ) { |
119 |
< |
// default to an error condition: |
120 |
< |
GroupToProcMap[i] = -1; |
121 |
< |
} |
122 |
< |
|
101 |
> |
|
102 |
|
if (parallelData->myNode == 0) { |
103 |
|
numerator = (double) entryPlug->n_atoms; |
104 |
|
denominator = (double) parallelData->nProcessors; |
109 |
|
molIndex = 0; |
110 |
|
for (i=0; i < nComponents; i++) { |
111 |
|
for (j=0; j < componentsNmol[i]; j++) { |
133 |
– |
MolComponentType[molIndex] = i; |
112 |
|
molIndex++; |
113 |
|
} |
114 |
|
} |
115 |
|
|
116 |
|
atomIndex = 0; |
139 |
– |
groupIndex = 0; |
117 |
|
|
118 |
|
for (i = 0; i < molIndex; i++ ) { |
119 |
|
|
132 |
|
old_atoms = AtomsPerProc[which_proc]; |
133 |
|
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
134 |
|
new_atoms = old_atoms + add_atoms; |
135 |
< |
|
159 |
< |
old_groups = GroupsPerProc[which_proc]; |
160 |
< |
ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups(); |
161 |
< |
nAtomsInGroups = 0; |
162 |
< |
for (j = 0; j < ncutoff_groups; j++) { |
163 |
< |
cg = compStamps[MolComponentType[i]]->getCutoffGroup(j); |
164 |
< |
nAtomsInGroups += cg->getNMembers(); |
165 |
< |
} |
166 |
< |
add_groups = add_atoms - nAtomsInGroups + ncutoff_groups; |
167 |
< |
new_groups = old_groups + add_groups; |
168 |
< |
|
135 |
> |
|
136 |
|
// If we've been through this loop too many times, we need |
137 |
|
// to just give up and assign the molecule to this processor |
138 |
|
// and be done with it. |
148 |
|
|
149 |
|
MolToProcMap[i] = which_proc; |
150 |
|
AtomsPerProc[which_proc] += add_atoms; |
151 |
< |
for (j = 0 ; j < add_atoms; j++ ) { |
185 |
< |
AtomToProcMap[atomIndex] = which_proc; |
186 |
< |
atomIndex++; |
187 |
< |
} |
188 |
< |
GroupsPerProc[which_proc] += add_groups; |
189 |
< |
for (j=0; j < add_groups; j++) { |
190 |
< |
GroupToProcMap[groupIndex] = which_proc; |
191 |
< |
groupIndex++; |
192 |
< |
} |
151 |
> |
|
152 |
|
done = 1; |
153 |
|
continue; |
154 |
|
} |
159 |
|
if (new_atoms <= nTarget) { |
160 |
|
MolToProcMap[i] = which_proc; |
161 |
|
AtomsPerProc[which_proc] += add_atoms; |
162 |
< |
for (j = 0 ; j < add_atoms; j++ ) { |
204 |
< |
AtomToProcMap[atomIndex] = which_proc; |
205 |
< |
atomIndex++; |
206 |
< |
} |
207 |
< |
GroupsPerProc[which_proc] += add_groups; |
208 |
< |
for (j=0; j < add_groups; j++) { |
209 |
< |
GroupToProcMap[groupIndex] = which_proc; |
210 |
< |
groupIndex++; |
211 |
< |
} |
162 |
> |
|
163 |
|
done = 1; |
164 |
|
continue; |
165 |
|
} |
179 |
|
if (y < exp(- a * x)) { |
180 |
|
MolToProcMap[i] = which_proc; |
181 |
|
AtomsPerProc[which_proc] += add_atoms; |
182 |
< |
for (j = 0 ; j < add_atoms; j++ ) { |
232 |
< |
AtomToProcMap[atomIndex] = which_proc; |
233 |
< |
atomIndex++; |
234 |
< |
} |
235 |
< |
GroupsPerProc[which_proc] += add_groups; |
236 |
< |
for (j=0; j < add_groups; j++) { |
237 |
< |
GroupToProcMap[groupIndex] = which_proc; |
238 |
< |
groupIndex++; |
239 |
< |
} |
182 |
> |
|
183 |
|
done = 1; |
184 |
|
continue; |
185 |
|
} else { |
192 |
|
|
193 |
|
// Spray out this nonsense to all other processors: |
194 |
|
|
252 |
– |
//std::cerr << "node 0 mol2proc = \n"; |
253 |
– |
//for (i = 0; i < parallelData->nMolGlobal; i++) |
254 |
– |
// std::cerr << i << "\t" << MolToProcMap[i] << "\n"; |
255 |
– |
|
195 |
|
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
196 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
197 |
< |
|
259 |
< |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
260 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
261 |
< |
|
262 |
< |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
263 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
264 |
< |
|
265 |
< |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
266 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
267 |
< |
|
268 |
< |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
269 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
270 |
< |
|
271 |
< |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
272 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
196 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
197 |
> |
|
198 |
|
} else { |
199 |
|
|
200 |
|
// Listen to your marching orders from processor 0: |
201 |
|
|
202 |
|
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
203 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
204 |
< |
|
280 |
< |
MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
281 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
282 |
< |
|
283 |
< |
MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
284 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
285 |
< |
|
286 |
< |
MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
287 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
288 |
< |
|
289 |
< |
MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
290 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
291 |
< |
|
292 |
< |
MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
293 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
294 |
< |
|
295 |
< |
|
203 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
204 |
> |
|
205 |
|
} |
206 |
|
|
207 |
|
// Let's all check for sanity: |
213 |
|
} |
214 |
|
} |
215 |
|
|
307 |
– |
natoms_local = 0; |
308 |
– |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
309 |
– |
if (AtomToProcMap[i] == parallelData->myNode) { |
310 |
– |
natoms_local++; |
311 |
– |
} |
312 |
– |
} |
216 |
|
|
314 |
– |
ngroups_local = 0; |
315 |
– |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
316 |
– |
if (GroupToProcMap[i] == parallelData->myNode) { |
317 |
– |
ngroups_local++; |
318 |
– |
} |
319 |
– |
} |
320 |
– |
|
217 |
|
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
218 |
< |
MPI_COMM_WORLD); |
323 |
< |
|
324 |
< |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
325 |
< |
MPI_SUM, MPI_COMM_WORLD); |
326 |
< |
|
327 |
< |
MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT, |
328 |
< |
MPI_SUM, MPI_COMM_WORLD); |
218 |
> |
MPI_COMM_WORLD); |
219 |
|
|
220 |
|
if( nmol_global != entryPlug->n_mol ){ |
221 |
|
sprintf( painCave.errMsg, |
226 |
|
simError(); |
227 |
|
} |
228 |
|
|
339 |
– |
if( natoms_global != entryPlug->n_atoms ){ |
340 |
– |
sprintf( painCave.errMsg, |
341 |
– |
"The sum of all natoms_local, %d, did not equal the " |
342 |
– |
"total number of atoms, %d.\n", |
343 |
– |
natoms_global, entryPlug->n_atoms ); |
344 |
– |
painCave.isFatal = 1; |
345 |
– |
simError(); |
346 |
– |
} |
347 |
– |
|
348 |
– |
if( ngroups_global != entryPlug->ngroup ){ |
349 |
– |
sprintf( painCave.errMsg, |
350 |
– |
"The sum of all ngroups_local, %d, did not equal the " |
351 |
– |
"total number of cutoffGroups, %d.\n", |
352 |
– |
ngroups_global, entryPlug->ngroup ); |
353 |
– |
painCave.isFatal = 1; |
354 |
– |
simError(); |
355 |
– |
} |
356 |
– |
|
229 |
|
sprintf( checkPointMsg, |
230 |
< |
"Successfully divided the molecules among the processors.\n" ); |
230 |
> |
"Successfully divided the molecules among the processors.\n" ); |
231 |
|
MPIcheckPoint(); |
232 |
|
|
233 |
|
parallelData->nMolLocal = nmol_local; |
234 |
|
parallelData->nAtomsLocal = natoms_local; |
235 |
|
parallelData->nGroupsLocal = ngroups_local; |
236 |
|
|
365 |
– |
globalAtomIndex.resize(parallelData->nAtomsLocal); |
366 |
– |
globalToLocalAtom.resize(parallelData->nAtomsGlobal); |
367 |
– |
local_index = 0; |
368 |
– |
for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
369 |
– |
if (AtomToProcMap[i] == parallelData->myNode) { |
370 |
– |
globalAtomIndex[local_index] = i; |
371 |
– |
|
372 |
– |
globalToLocalAtom[i] = local_index; |
373 |
– |
local_index++; |
374 |
– |
|
375 |
– |
} |
376 |
– |
else |
377 |
– |
globalToLocalAtom[i] = -1; |
378 |
– |
} |
379 |
– |
|
380 |
– |
globalGroupIndex.resize(parallelData->nGroupsLocal); |
381 |
– |
globalToLocalGroup.resize(parallelData->nGroupsGlobal); |
382 |
– |
local_index = 0; |
383 |
– |
for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
384 |
– |
if (GroupToProcMap[i] == parallelData->myNode) { |
385 |
– |
globalGroupIndex[local_index] = i; |
386 |
– |
|
387 |
– |
globalToLocalGroup[i] = local_index; |
388 |
– |
local_index++; |
389 |
– |
|
390 |
– |
} |
391 |
– |
else |
392 |
– |
globalToLocalGroup[i] = -1; |
393 |
– |
} |
394 |
– |
|
237 |
|
globalMolIndex.resize(parallelData->nMolLocal); |
396 |
– |
globalToLocalMol.resize(parallelData->nMolGlobal); |
238 |
|
local_index = 0; |
239 |
|
for (i = 0; i < parallelData->nMolGlobal; i++) { |
240 |
|
if (MolToProcMap[i] == parallelData->myNode) { |
241 |
|
globalMolIndex[local_index] = i; |
401 |
– |
globalToLocalMol[i] = local_index; |
242 |
|
local_index++; |
243 |
|
} |
244 |
< |
else |
405 |
< |
globalToLocalMol[i] = -1; |
244 |
> |
|
245 |
|
} |
246 |
|
|
247 |
|
} |
272 |
|
if( isError ){ |
273 |
|
|
274 |
|
sprintf( painCave.errMsg, |
275 |
< |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
275 |
> |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
276 |
|
painCave.isFatal = 1; |
277 |
|
simError(); |
278 |
|
} |
282 |
|
|
283 |
|
|
284 |
|
sprintf( checkPointMsg, |
285 |
< |
" mpiRefresh successful.\n" ); |
285 |
> |
" mpiRefresh successful.\n" ); |
286 |
|
MPIcheckPoint(); |
287 |
|
} |
288 |
|
|