# | Line 15 | Line 15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
---|---|---|
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
16 | { | |
17 | entryPlug = the_entryPlug; | |
18 | < | mpiPlug = new mpiSimData; |
18 | > | parallelData = new mpiSimData; |
19 | ||
20 | < | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); |
21 | < | mpiPlug->myNode = worldRank; |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
21 | > | parallelData->myNode = worldRank; |
22 | ||
23 | MolToProcMap = new int[entryPlug->n_mol]; | |
24 | MolComponentType = new int[entryPlug->n_mol]; | |
25 | AtomToProcMap = new int[entryPlug->n_atoms]; | |
26 | + | GroupToProcMap = new int[entryPlug->ngroup]; |
27 | ||
28 | mpiSim = this; | |
29 | wrapMeSimParallel( this ); | |
# | Line 34 | Line 35 | mpiSimulation::~mpiSimulation(){ | |
35 | delete[] MolToProcMap; | |
36 | delete[] MolComponentType; | |
37 | delete[] AtomToProcMap; | |
38 | + | delete[] GroupToProcMap; |
39 | ||
40 | < | delete mpiPlug; |
40 | > | delete parallelData; |
41 | // perhaps we should let fortran know the party is over. | |
42 | ||
43 | } | |
# | Line 47 | Line 49 | void mpiSimulation::divideLabor( ){ | |
49 | randomSPRNG *myRandom; | |
50 | int* componentsNmol; | |
51 | int* AtomsPerProc; | |
52 | + | int* GroupsPerProc; |
53 | ||
54 | double numerator; | |
55 | double denominator; | |
56 | double precast; | |
57 | double x, y, a; | |
58 | int old_atoms, add_atoms, new_atoms; | |
59 | + | int old_groups, add_groups, new_groups; |
60 | ||
61 | int nTarget; | |
62 | < | int molIndex, atomIndex; |
62 | > | int molIndex, atomIndex, groupIndex; |
63 | int done; | |
64 | < | int i, j, loops, which_proc, nmol_local, natoms_local; |
65 | < | int nmol_global, natoms_global; |
64 | > | int i, j, loops, which_proc; |
65 | > | int nmol_global, nmol_local; |
66 | > | int ngroups_global, ngroups_local; |
67 | > | int natoms_global, natoms_local; |
68 | > | int ncutoff_groups, nAtomsInGroups; |
69 | int local_index; | |
70 | int baseSeed = entryPlug->getSeed(); | |
71 | + | CutoffGroupStamp* cg; |
72 | ||
73 | nComponents = entryPlug->nComponents; | |
74 | compStamps = entryPlug->compStamps; | |
75 | componentsNmol = entryPlug->componentsNmol; | |
76 | < | AtomsPerProc = new int[mpiPlug->nProcessors]; |
76 | > | AtomsPerProc = new int[parallelData->nProcessors]; |
77 | > | GroupsPerProc = new int[parallelData->nProcessors]; |
78 | ||
79 | < | mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
80 | < | mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
81 | < | mpiPlug->nBendsGlobal = entryPlug->n_bends; |
82 | < | mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
83 | < | mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
84 | < | mpiPlug->nMolGlobal = entryPlug->n_mol; |
85 | < | mpiPlug->nGroupsGlobal = entryPlug->ngroup; |
79 | > | parallelData->nAtomsGlobal = entryPlug->n_atoms; |
80 | > | parallelData->nBondsGlobal = entryPlug->n_bonds; |
81 | > | parallelData->nBendsGlobal = entryPlug->n_bends; |
82 | > | parallelData->nTorsionsGlobal = entryPlug->n_torsions; |
83 | > | parallelData->nSRIGlobal = entryPlug->n_SRI; |
84 | > | parallelData->nGroupsGlobal = entryPlug->ngroup; |
85 | > | parallelData->nMolGlobal = entryPlug->n_mol; |
86 | ||
87 | myRandom = new randomSPRNG( baseSeed ); | |
88 | ||
89 | < | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
89 | > | a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal; |
90 | ||
91 | // Initialize things that we'll send out later: | |
92 | < | for (i = 0; i < mpiPlug->nProcessors; i++ ) { |
92 | > | for (i = 0; i < parallelData->nProcessors; i++ ) { |
93 | AtomsPerProc[i] = 0; | |
94 | + | GroupsPerProc[i] = 0; |
95 | } | |
96 | < | for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { |
96 | > | for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
97 | // default to an error condition: | |
98 | MolToProcMap[i] = -1; | |
99 | MolComponentType[i] = -1; | |
100 | } | |
101 | < | for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) { |
101 | > | for (i = 0; i < parallelData->nAtomsGlobal; i++ ) { |
102 | // default to an error condition: | |
103 | AtomToProcMap[i] = -1; | |
104 | } | |
105 | + | for (i = 0; i < parallelData->nGroupsGlobal; i++ ) { |
106 | + | // default to an error condition: |
107 | + | GroupToProcMap[i] = -1; |
108 | + | } |
109 | ||
110 | < | if (mpiPlug->myNode == 0) { |
110 | > | if (parallelData->myNode == 0) { |
111 | numerator = (double) entryPlug->n_atoms; | |
112 | < | denominator = (double) mpiPlug->nProcessors; |
112 | > | denominator = (double) parallelData->nProcessors; |
113 | precast = numerator / denominator; | |
114 | nTarget = (int)( precast + 0.5 ); | |
115 | ||
# | Line 109 | Line 123 | void mpiSimulation::divideLabor( ){ | |
123 | } | |
124 | ||
125 | atomIndex = 0; | |
126 | + | groupIndex = 0; |
127 | ||
128 | for (i = 0; i < molIndex; i++ ) { | |
129 | ||
# | Line 120 | Line 135 | void mpiSimulation::divideLabor( ){ | |
135 | ||
136 | // Pick a processor at random | |
137 | ||
138 | < | which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); |
138 | > | which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors); |
139 | ||
140 | // How many atoms does this processor have? | |
141 | ||
# | Line 128 | Line 143 | void mpiSimulation::divideLabor( ){ | |
143 | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); | |
144 | new_atoms = old_atoms + add_atoms; | |
145 | ||
146 | + | old_groups = GroupsPerProc[which_proc]; |
147 | + | ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups(); |
148 | + | nAtomsInGroups = 0; |
149 | + | for (j = 0; j < ncutoff_groups; j++) { |
150 | + | cg = compStamps[MolComponentType[i]]->getCutoffGroup(j); |
151 | + | nAtomsInGroups += cg->getNMembers(); |
152 | + | } |
153 | + | add_groups = add_atoms - nAtomsInGroups + ncutoff_groups; |
154 | + | new_groups = old_groups + add_groups; |
155 | + | |
156 | // If we've been through this loop too many times, we need | |
157 | // to just give up and assign the molecule to this processor | |
158 | // and be done with it. | |
# | Line 147 | Line 172 | void mpiSimulation::divideLabor( ){ | |
172 | AtomToProcMap[atomIndex] = which_proc; | |
173 | atomIndex++; | |
174 | } | |
175 | + | GroupsPerProc[which_proc] += add_groups; |
176 | + | for (j=0; j < add_groups; j++) { |
177 | + | GroupToProcMap[groupIndex] = which_proc; |
178 | + | groupIndex++; |
179 | + | } |
180 | done = 1; | |
181 | continue; | |
182 | } | |
# | Line 161 | Line 191 | void mpiSimulation::divideLabor( ){ | |
191 | AtomToProcMap[atomIndex] = which_proc; | |
192 | atomIndex++; | |
193 | } | |
194 | + | GroupsPerProc[which_proc] += add_groups; |
195 | + | for (j=0; j < add_groups; j++) { |
196 | + | GroupToProcMap[groupIndex] = which_proc; |
197 | + | groupIndex++; |
198 | + | } |
199 | done = 1; | |
200 | continue; | |
201 | } | |
# | Line 184 | Line 219 | void mpiSimulation::divideLabor( ){ | |
219 | AtomToProcMap[atomIndex] = which_proc; | |
220 | atomIndex++; | |
221 | } | |
222 | + | GroupsPerProc[which_proc] += add_groups; |
223 | + | for (j=0; j < add_groups; j++) { |
224 | + | GroupToProcMap[groupIndex] = which_proc; |
225 | + | groupIndex++; |
226 | + | } |
227 | done = 1; | |
228 | continue; | |
229 | } else { | |
# | Line 193 | Line 233 | void mpiSimulation::divideLabor( ){ | |
233 | } | |
234 | } | |
235 | ||
236 | + | |
237 | // Spray out this nonsense to all other processors: | |
238 | ||
239 | < | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
239 | > | //std::cerr << "node 0 mol2proc = \n"; |
240 | > | //for (i = 0; i < parallelData->nMolGlobal; i++) |
241 | > | // std::cerr << i << "\t" << MolToProcMap[i] << "\n"; |
242 | > | |
243 | > | MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
244 | MPI_INT, 0, MPI_COMM_WORLD); | |
245 | ||
246 | < | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
246 | > | MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
247 | MPI_INT, 0, MPI_COMM_WORLD); | |
248 | ||
249 | < | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
249 | > | MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
250 | MPI_INT, 0, MPI_COMM_WORLD); | |
251 | ||
252 | < | MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
252 | > | MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
253 | > | MPI_INT, 0, MPI_COMM_WORLD); |
254 | > | |
255 | > | MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
256 | MPI_INT, 0, MPI_COMM_WORLD); | |
257 | + | |
258 | + | MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
259 | + | MPI_INT, 0, MPI_COMM_WORLD); |
260 | } else { | |
261 | ||
262 | // Listen to your marching orders from processor 0: | |
263 | ||
264 | < | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
264 | > | MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
265 | MPI_INT, 0, MPI_COMM_WORLD); | |
266 | ||
267 | < | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
267 | > | MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
268 | MPI_INT, 0, MPI_COMM_WORLD); | |
269 | ||
270 | < | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
270 | > | MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
271 | > | MPI_INT, 0, MPI_COMM_WORLD); |
272 | > | |
273 | > | MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
274 | MPI_INT, 0, MPI_COMM_WORLD); | |
275 | ||
276 | < | MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
276 | > | MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
277 | MPI_INT, 0, MPI_COMM_WORLD); | |
278 | ||
279 | + | MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
280 | + | MPI_INT, 0, MPI_COMM_WORLD); |
281 | ||
282 | + | |
283 | } | |
284 | ||
228 | – | |
285 | // Let's all check for sanity: | |
286 | ||
287 | nmol_local = 0; | |
288 | < | for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) { |
289 | < | if (MolToProcMap[i] == mpiPlug->myNode) { |
288 | > | for (i = 0 ; i < parallelData->nMolGlobal; i++ ) { |
289 | > | if (MolToProcMap[i] == parallelData->myNode) { |
290 | nmol_local++; | |
291 | } | |
292 | } | |
293 | ||
294 | natoms_local = 0; | |
295 | < | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
296 | < | if (AtomToProcMap[i] == mpiPlug->myNode) { |
295 | > | for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
296 | > | if (AtomToProcMap[i] == parallelData->myNode) { |
297 | natoms_local++; | |
298 | } | |
299 | } | |
300 | ||
301 | + | ngroups_local = 0; |
302 | + | for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
303 | + | if (GroupToProcMap[i] == parallelData->myNode) { |
304 | + | ngroups_local++; |
305 | + | } |
306 | + | } |
307 | + | |
308 | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, | |
309 | MPI_COMM_WORLD); | |
310 | + | |
311 | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, | |
312 | MPI_SUM, MPI_COMM_WORLD); | |
313 | + | |
314 | + | MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT, |
315 | + | MPI_SUM, MPI_COMM_WORLD); |
316 | ||
317 | if( nmol_global != entryPlug->n_mol ){ | |
318 | sprintf( painCave.errMsg, | |
# | Line 265 | Line 332 | void mpiSimulation::divideLabor( ){ | |
332 | simError(); | |
333 | } | |
334 | ||
335 | + | if( ngroups_global != entryPlug->ngroup ){ |
336 | + | sprintf( painCave.errMsg, |
337 | + | "The sum of all ngroups_local, %d, did not equal the " |
338 | + | "total number of cutoffGroups, %d.\n", |
339 | + | ngroups_global, entryPlug->ngroup ); |
340 | + | painCave.isFatal = 1; |
341 | + | simError(); |
342 | + | } |
343 | + | |
344 | sprintf( checkPointMsg, | |
345 | "Successfully divided the molecules among the processors.\n" ); | |
346 | MPIcheckPoint(); | |
347 | ||
348 | < | mpiPlug->nMolLocal = nmol_local; |
349 | < | mpiPlug->nAtomsLocal = natoms_local; |
348 | > | parallelData->nMolLocal = nmol_local; |
349 | > | parallelData->nAtomsLocal = natoms_local; |
350 | > | parallelData->nGroupsLocal = ngroups_local; |
351 | ||
352 | < | globalAtomIndex.resize(mpiPlug->nAtomsLocal); |
353 | < | globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); |
352 | > | globalAtomIndex.resize(parallelData->nAtomsLocal); |
353 | > | globalToLocalAtom.resize(parallelData->nAtomsGlobal); |
354 | local_index = 0; | |
355 | < | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
356 | < | if (AtomToProcMap[i] == mpiPlug->myNode) { |
355 | > | for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
356 | > | if (AtomToProcMap[i] == parallelData->myNode) { |
357 | globalAtomIndex[local_index] = i; | |
358 | ||
359 | globalToLocalAtom[i] = local_index; | |
# | Line 287 | Line 364 | void mpiSimulation::divideLabor( ){ | |
364 | globalToLocalAtom[i] = -1; | |
365 | } | |
366 | ||
367 | < | globalMolIndex.resize(mpiPlug->nMolLocal); |
368 | < | globalToLocalMol.resize(mpiPlug->nMolGlobal); |
292 | < | |
367 | > | globalGroupIndex.resize(parallelData->nGroupsLocal); |
368 | > | globalToLocalGroup.resize(parallelData->nGroupsGlobal); |
369 | local_index = 0; | |
370 | < | for (i = 0; i < mpiPlug->nMolGlobal; i++) { |
371 | < | if (MolToProcMap[i] == mpiPlug->myNode) { |
370 | > | for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
371 | > | if (GroupToProcMap[i] == parallelData->myNode) { |
372 | > | globalGroupIndex[local_index] = i; |
373 | > | |
374 | > | globalToLocalGroup[i] = local_index; |
375 | > | local_index++; |
376 | > | |
377 | > | } |
378 | > | else |
379 | > | globalToLocalGroup[i] = -1; |
380 | > | } |
381 | > | |
382 | > | globalMolIndex.resize(parallelData->nMolLocal); |
383 | > | globalToLocalMol.resize(parallelData->nMolGlobal); |
384 | > | local_index = 0; |
385 | > | for (i = 0; i < parallelData->nMolGlobal; i++) { |
386 | > | if (MolToProcMap[i] == parallelData->myNode) { |
387 | globalMolIndex[local_index] = i; | |
388 | globalToLocalMol[i] = local_index; | |
389 | local_index++; | |
# | Line 307 | Line 398 | void mpiSimulation::mpiRefresh( void ){ | |
398 | void mpiSimulation::mpiRefresh( void ){ | |
399 | ||
400 | int isError, i; | |
401 | < | int *globalIndex = new int[mpiPlug->nAtomsLocal]; |
401 | > | int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
402 | > | int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
403 | ||
404 | < | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
405 | < | // not barf |
404 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 |
405 | > | // languages to not barf |
406 | ||
407 | < | for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
407 | > | for(i = 0; i < parallelData->nAtomsLocal; i++) |
408 | > | localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; |
409 | ||
410 | + | for(i = 0; i < parallelData->nGroupsLocal; i++) |
411 | + | localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; |
412 | ||
413 | isError = 0; | |
414 | < | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
414 | > | |
415 | > | setFsimParallel( parallelData, |
416 | > | &(parallelData->nAtomsLocal), localToGlobalAtomIndex, |
417 | > | &(parallelData->nGroupsLocal), localToGlobalGroupIndex, |
418 | > | &isError ); |
419 | > | |
420 | if( isError ){ | |
421 | ||
422 | sprintf( painCave.errMsg, | |
# | Line 325 | Line 425 | void mpiSimulation::mpiRefresh( void ){ | |
425 | simError(); | |
426 | } | |
427 | ||
428 | < | delete[] globalIndex; |
428 | > | delete[] localToGlobalGroupIndex; |
429 | > | delete[] localToGlobalAtomIndex; |
430 | ||
431 | + | |
432 | sprintf( checkPointMsg, | |
433 | " mpiRefresh successful.\n" ); | |
434 | MPIcheckPoint(); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |