ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-4/src/brains/mpiSimulation.cpp
Revision: 1492
Committed: Fri Sep 24 16:27:58 2004 UTC (19 years, 9 months ago) by tim
File size: 13058 byte(s)
Log Message:
change the #include in source files

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <math.h>
6 #include <mpi.h>
7
8 #include "brains/mpiSimulation.hpp"
9 #include "utils/simError.h"
10 #include "UseTheForce/fortranWrappers.hpp"
11 #include "math/randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 entryPlug = the_entryPlug;
18 parallelData = new mpiSimData;
19
20 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21 parallelData->myNode = worldRank;
22
23 MolToProcMap = new int[entryPlug->n_mol];
24 MolComponentType = new int[entryPlug->n_mol];
25 AtomToProcMap = new int[entryPlug->n_atoms];
26 GroupToProcMap = new int[entryPlug->ngroup];
27
28 mpiSim = this;
29 wrapMeSimParallel( this );
30 }
31
32
33 mpiSimulation::~mpiSimulation(){
34
35 delete[] MolToProcMap;
36 delete[] MolComponentType;
37 delete[] AtomToProcMap;
38 delete[] GroupToProcMap;
39
40 delete parallelData;
41 // perhaps we should let fortran know the party is over.
42
43 }
44
45 void mpiSimulation::divideLabor( ){
46
47 int nComponents;
48 MoleculeStamp** compStamps;
49 randomSPRNG *myRandom;
50 int* componentsNmol;
51 int* AtomsPerProc;
52 int* GroupsPerProc;
53
54 double numerator;
55 double denominator;
56 double precast;
57 double x, y, a;
58 int old_atoms, add_atoms, new_atoms;
59 int old_groups, add_groups, new_groups;
60
61 int nTarget;
62 int molIndex, atomIndex, groupIndex;
63 int done;
64 int i, j, loops, which_proc;
65 int nmol_global, nmol_local;
66 int ngroups_global, ngroups_local;
67 int natoms_global, natoms_local;
68 int ncutoff_groups, nAtomsInGroups;
69 int local_index;
70 int baseSeed = entryPlug->getSeed();
71 CutoffGroupStamp* cg;
72
73 nComponents = entryPlug->nComponents;
74 compStamps = entryPlug->compStamps;
75 componentsNmol = entryPlug->componentsNmol;
76 AtomsPerProc = new int[parallelData->nProcessors];
77 GroupsPerProc = new int[parallelData->nProcessors];
78
79 parallelData->nAtomsGlobal = entryPlug->n_atoms;
80 parallelData->nBondsGlobal = entryPlug->n_bonds;
81 parallelData->nBendsGlobal = entryPlug->n_bends;
82 parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83 parallelData->nSRIGlobal = entryPlug->n_SRI;
84 parallelData->nGroupsGlobal = entryPlug->ngroup;
85 parallelData->nMolGlobal = entryPlug->n_mol;
86
87 if (parallelData->nProcessors > parallelData->nMolGlobal) {
88 sprintf( painCave.errMsg,
89 "nProcessors (%d) > nMol (%d)\n"
90 "\tThe number of processors is larger than\n"
91 "\tthe number of molecules. This will not result in a \n"
92 "\tusable division of atoms for force decomposition.\n"
93 "\tEither try a smaller number of processors, or run the\n"
94 "\tsingle-processor version of OOPSE.\n",
95 parallelData->nProcessors, parallelData->nMolGlobal );
96 painCave.isFatal = 1;
97 simError();
98 }
99
100 myRandom = new randomSPRNG( baseSeed );
101
102
103 a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
104
105 // Initialize things that we'll send out later:
106 for (i = 0; i < parallelData->nProcessors; i++ ) {
107 AtomsPerProc[i] = 0;
108 GroupsPerProc[i] = 0;
109 }
110 for (i = 0; i < parallelData->nMolGlobal; i++ ) {
111 // default to an error condition:
112 MolToProcMap[i] = -1;
113 MolComponentType[i] = -1;
114 }
115 for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
116 // default to an error condition:
117 AtomToProcMap[i] = -1;
118 }
119 for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
120 // default to an error condition:
121 GroupToProcMap[i] = -1;
122 }
123
124 if (parallelData->myNode == 0) {
125 numerator = (double) entryPlug->n_atoms;
126 denominator = (double) parallelData->nProcessors;
127 precast = numerator / denominator;
128 nTarget = (int)( precast + 0.5 );
129
130 // Build the array of molecule component types first
131 molIndex = 0;
132 for (i=0; i < nComponents; i++) {
133 for (j=0; j < componentsNmol[i]; j++) {
134 MolComponentType[molIndex] = i;
135 molIndex++;
136 }
137 }
138
139 atomIndex = 0;
140 groupIndex = 0;
141
142 for (i = 0; i < molIndex; i++ ) {
143
144 done = 0;
145 loops = 0;
146
147 while( !done ){
148 loops++;
149
150 // Pick a processor at random
151
152 which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
153
154 // How many atoms does this processor have?
155
156 old_atoms = AtomsPerProc[which_proc];
157 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
158 new_atoms = old_atoms + add_atoms;
159
160 old_groups = GroupsPerProc[which_proc];
161 ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
162 nAtomsInGroups = 0;
163 for (j = 0; j < ncutoff_groups; j++) {
164 cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
165 nAtomsInGroups += cg->getNMembers();
166 }
167 add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;
168 new_groups = old_groups + add_groups;
169
170 // If we've been through this loop too many times, we need
171 // to just give up and assign the molecule to this processor
172 // and be done with it.
173
174 if (loops > 100) {
175 sprintf( painCave.errMsg,
176 "I've tried 100 times to assign molecule %d to a "
177 " processor, but can't find a good spot.\n"
178 "I'm assigning it at random to processor %d.\n",
179 i, which_proc);
180 painCave.isFatal = 0;
181 simError();
182
183 MolToProcMap[i] = which_proc;
184 AtomsPerProc[which_proc] += add_atoms;
185 for (j = 0 ; j < add_atoms; j++ ) {
186 AtomToProcMap[atomIndex] = which_proc;
187 atomIndex++;
188 }
189 GroupsPerProc[which_proc] += add_groups;
190 for (j=0; j < add_groups; j++) {
191 GroupToProcMap[groupIndex] = which_proc;
192 groupIndex++;
193 }
194 done = 1;
195 continue;
196 }
197
198 // If we can add this molecule to this processor without sending
199 // it above nTarget, then go ahead and do it:
200
201 if (new_atoms <= nTarget) {
202 MolToProcMap[i] = which_proc;
203 AtomsPerProc[which_proc] += add_atoms;
204 for (j = 0 ; j < add_atoms; j++ ) {
205 AtomToProcMap[atomIndex] = which_proc;
206 atomIndex++;
207 }
208 GroupsPerProc[which_proc] += add_groups;
209 for (j=0; j < add_groups; j++) {
210 GroupToProcMap[groupIndex] = which_proc;
211 groupIndex++;
212 }
213 done = 1;
214 continue;
215 }
216
217
218 // The only situation left is when new_atoms > nTarget. We
219 // want to accept this with some probability that dies off the
220 // farther we are from nTarget
221
222 // roughly: x = new_atoms - nTarget
223 // Pacc(x) = exp(- a * x)
224 // where a = penalty / (average atoms per molecule)
225
226 x = (double) (new_atoms - nTarget);
227 y = myRandom->getRandom();
228
229 if (y < exp(- a * x)) {
230 MolToProcMap[i] = which_proc;
231 AtomsPerProc[which_proc] += add_atoms;
232 for (j = 0 ; j < add_atoms; j++ ) {
233 AtomToProcMap[atomIndex] = which_proc;
234 atomIndex++;
235 }
236 GroupsPerProc[which_proc] += add_groups;
237 for (j=0; j < add_groups; j++) {
238 GroupToProcMap[groupIndex] = which_proc;
239 groupIndex++;
240 }
241 done = 1;
242 continue;
243 } else {
244 continue;
245 }
246
247 }
248 }
249
250
251 // Spray out this nonsense to all other processors:
252
253 //std::cerr << "node 0 mol2proc = \n";
254 //for (i = 0; i < parallelData->nMolGlobal; i++)
255 // std::cerr << i << "\t" << MolToProcMap[i] << "\n";
256
257 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
258 MPI_INT, 0, MPI_COMM_WORLD);
259
260 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
261 MPI_INT, 0, MPI_COMM_WORLD);
262
263 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
264 MPI_INT, 0, MPI_COMM_WORLD);
265
266 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
267 MPI_INT, 0, MPI_COMM_WORLD);
268
269 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
270 MPI_INT, 0, MPI_COMM_WORLD);
271
272 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
273 MPI_INT, 0, MPI_COMM_WORLD);
274 } else {
275
276 // Listen to your marching orders from processor 0:
277
278 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
279 MPI_INT, 0, MPI_COMM_WORLD);
280
281 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
282 MPI_INT, 0, MPI_COMM_WORLD);
283
284 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
285 MPI_INT, 0, MPI_COMM_WORLD);
286
287 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
288 MPI_INT, 0, MPI_COMM_WORLD);
289
290 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
291 MPI_INT, 0, MPI_COMM_WORLD);
292
293 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
294 MPI_INT, 0, MPI_COMM_WORLD);
295
296
297 }
298
299 // Let's all check for sanity:
300
301 nmol_local = 0;
302 for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
303 if (MolToProcMap[i] == parallelData->myNode) {
304 nmol_local++;
305 }
306 }
307
308 natoms_local = 0;
309 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
310 if (AtomToProcMap[i] == parallelData->myNode) {
311 natoms_local++;
312 }
313 }
314
315 ngroups_local = 0;
316 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
317 if (GroupToProcMap[i] == parallelData->myNode) {
318 ngroups_local++;
319 }
320 }
321
322 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
323 MPI_COMM_WORLD);
324
325 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
326 MPI_SUM, MPI_COMM_WORLD);
327
328 MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
329 MPI_SUM, MPI_COMM_WORLD);
330
331 if( nmol_global != entryPlug->n_mol ){
332 sprintf( painCave.errMsg,
333 "The sum of all nmol_local, %d, did not equal the "
334 "total number of molecules, %d.\n",
335 nmol_global, entryPlug->n_mol );
336 painCave.isFatal = 1;
337 simError();
338 }
339
340 if( natoms_global != entryPlug->n_atoms ){
341 sprintf( painCave.errMsg,
342 "The sum of all natoms_local, %d, did not equal the "
343 "total number of atoms, %d.\n",
344 natoms_global, entryPlug->n_atoms );
345 painCave.isFatal = 1;
346 simError();
347 }
348
349 if( ngroups_global != entryPlug->ngroup ){
350 sprintf( painCave.errMsg,
351 "The sum of all ngroups_local, %d, did not equal the "
352 "total number of cutoffGroups, %d.\n",
353 ngroups_global, entryPlug->ngroup );
354 painCave.isFatal = 1;
355 simError();
356 }
357
358 sprintf( checkPointMsg,
359 "Successfully divided the molecules among the processors.\n" );
360 MPIcheckPoint();
361
362 parallelData->nMolLocal = nmol_local;
363 parallelData->nAtomsLocal = natoms_local;
364 parallelData->nGroupsLocal = ngroups_local;
365
366 globalAtomIndex.resize(parallelData->nAtomsLocal);
367 globalToLocalAtom.resize(parallelData->nAtomsGlobal);
368 local_index = 0;
369 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
370 if (AtomToProcMap[i] == parallelData->myNode) {
371 globalAtomIndex[local_index] = i;
372
373 globalToLocalAtom[i] = local_index;
374 local_index++;
375
376 }
377 else
378 globalToLocalAtom[i] = -1;
379 }
380
381 globalGroupIndex.resize(parallelData->nGroupsLocal);
382 globalToLocalGroup.resize(parallelData->nGroupsGlobal);
383 local_index = 0;
384 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
385 if (GroupToProcMap[i] == parallelData->myNode) {
386 globalGroupIndex[local_index] = i;
387
388 globalToLocalGroup[i] = local_index;
389 local_index++;
390
391 }
392 else
393 globalToLocalGroup[i] = -1;
394 }
395
396 globalMolIndex.resize(parallelData->nMolLocal);
397 globalToLocalMol.resize(parallelData->nMolGlobal);
398 local_index = 0;
399 for (i = 0; i < parallelData->nMolGlobal; i++) {
400 if (MolToProcMap[i] == parallelData->myNode) {
401 globalMolIndex[local_index] = i;
402 globalToLocalMol[i] = local_index;
403 local_index++;
404 }
405 else
406 globalToLocalMol[i] = -1;
407 }
408
409 }
410
411
412 void mpiSimulation::mpiRefresh( void ){
413
414 int isError, i;
415 int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal];
416 int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal];
417
418 // Fortran indexing needs to be increased by 1 in order to get the 2
419 // languages to not barf
420
421 for(i = 0; i < parallelData->nAtomsLocal; i++)
422 localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1;
423
424 for(i = 0; i < parallelData->nGroupsLocal; i++)
425 localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1;
426
427 isError = 0;
428
429 setFsimParallel( parallelData,
430 &(parallelData->nAtomsLocal), localToGlobalAtomIndex,
431 &(parallelData->nGroupsLocal), localToGlobalGroupIndex,
432 &isError );
433
434 if( isError ){
435
436 sprintf( painCave.errMsg,
437 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
438 painCave.isFatal = 1;
439 simError();
440 }
441
442 delete[] localToGlobalGroupIndex;
443 delete[] localToGlobalAtomIndex;
444
445
446 sprintf( checkPointMsg,
447 " mpiRefresh successful.\n" );
448 MPIcheckPoint();
449 }
450
451
452 #endif // is_mpi