ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 416
Committed: Wed Mar 26 23:14:02 2003 UTC (21 years, 3 months ago) by gezelter
File size: 8576 byte(s)
Log Message:
bug fixes   many bug fixes

File Contents

# Content
1 #ifdef IS_MPI
2
3 #include <cstdlib>
4 #include <cstring>
5 #include <cmath>
6 #include <mpi.h>
7 #include <mpi++.h>
8
9 #include "mpiSimulation.hpp"
10 #include "simError.h"
11 #include "fortranWrappers.hpp"
12 #include "randomSPRNG.hpp"
13
14 #define BASE_SEED 123456789
15
16 mpiSimulation* mpiSim;
17
18 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19 {
20 entryPlug = the_entryPlug;
21 mpiPlug = new mpiSimData;
22
23 mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
24 mpiPlug->myNode = worldRank;
25
26 MolToProcMap = new int[entryPlug->n_mol];
27 MolComponentType = new int[entryPlug->n_mol];
28
29 AtomToProcMap = new int[entryPlug->n_atoms];
30
31 mpiSim = this;
32 wrapMeSimParallel( this );
33 }
34
35
36 mpiSimulation::~mpiSimulation(){
37
38 delete mpiPlug;
39 // perhaps we should let fortran know the party is over.
40
41 }
42
43 int* mpiSimulation::divideLabor( void ){
44
45 int* globalIndex;
46
47 int nComponents;
48 MoleculeStamp** compStamps;
49 randomSPRNG *myRandom;
50 int* componentsNmol;
51 int* AtomsPerProc;
52
53 double numerator;
54 double denominator;
55 double precast;
56 double x, y, a;
57 int old_atoms, add_atoms, new_atoms;
58
59 int nTarget;
60 int molIndex, atomIndex, compIndex, compStart;
61 int done;
62 int nLocal, molLocal;
63 int i, j, loops, which_proc, nmol_local, natoms_local;
64 int nmol_global, natoms_global;
65 int local_index, index;
66 int smallDiff, bigDiff;
67 int baseSeed = BASE_SEED;
68
69 int testSum;
70
71 nComponents = entryPlug->nComponents;
72 compStamps = entryPlug->compStamps;
73 componentsNmol = entryPlug->componentsNmol;
74 AtomsPerProc = new int[mpiPlug->numberProcessors];
75
76 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
77 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
78 mpiPlug->nBendsGlobal = entryPlug->n_bends;
79 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
80 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
81 mpiPlug->nMolGlobal = entryPlug->n_mol;
82
83 myRandom = new randomSPRNG( baseSeed );
84
85 a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
86
87 // Initialize things that we'll send out later:
88 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
89 AtomsPerProc[i] = 0;
90 }
91 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
92 // default to an error condition:
93 MolToProcMap[i] = -1;
94 MolComponentType[i] = -1;
95 }
96 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
97 // default to an error condition:
98 AtomToProcMap[i] = -1;
99 }
100
101 if (mpiPlug->myNode == 0) {
102 numerator = (double) entryPlug->n_atoms;
103 denominator = (double) mpiPlug->numberProcessors;
104 precast = numerator / denominator;
105 nTarget = (int)( precast + 0.5 );
106
107 // Build the array of molecule component types first
108 molIndex = 0;
109 for (i=0; i < nComponents; i++) {
110 for (j=0; j < componentsNmol[i]; j++) {
111 MolComponentType[molIndex] = i;
112 molIndex++;
113 }
114 }
115
116 atomIndex = 0;
117
118 for (i = 0; i < molIndex; i++ ) {
119
120 done = 0;
121 loops = 0;
122
123 while( !done ){
124 loops++;
125
126 // Pick a processor at random
127
128 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
129
130 // How many atoms does this processor have?
131
132 old_atoms = AtomsPerProc[which_proc];
133
134 // If the processor already had too many atoms, just skip this
135 // processor and try again.
136
137 if (old_atoms >= nTarget) continue;
138
139 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
140 new_atoms = old_atoms + add_atoms;
141
142 // If we can add this molecule to this processor without sending
143 // it above nTarget, then go ahead and do it:
144
145 if (new_atoms <= nTarget) {
146 MolToProcMap[i] = which_proc;
147 AtomsPerProc[which_proc] += add_atoms;
148 for (j = 0 ; j < add_atoms; j++ ) {
149 atomIndex++;
150 AtomToProcMap[atomIndex] = which_proc;
151 }
152 done = 1;
153 continue;
154 }
155
156 // If we've been through this loop too many times, we need
157 // to just give up and assign the molecule to this processor
158 // and be done with it.
159
160 if (loops > 100) {
161 sprintf( painCave.errMsg,
162 "I've tried 100 times to assign molecule %d to a "
163 " processor, but can't find a good spot.\n"
164 "I'm assigning it at random to processor %d.\n",
165 i, which_proc);
166 painCave.isFatal = 0;
167 simError();
168
169 MolToProcMap[i] = which_proc;
170 AtomsPerProc[which_proc] += add_atoms;
171 for (j = 0 ; j < add_atoms; j++ ) {
172 atomIndex++;
173 AtomToProcMap[atomIndex] = which_proc;
174 }
175 done = 1;
176 continue;
177 }
178
179 // The only situation left is where old_atoms < nTarget, but
180 // new_atoms > nTarget. We want to accept this with some
181 // probability that dies off the farther we are from nTarget
182
183 // roughly: x = new_atoms - nTarget
184 // Pacc(x) = exp(- a * x)
185 // where a = 1 / (average atoms per molecule)
186
187 x = (double) (new_atoms - nTarget);
188 y = myRandom->getRandom();
189
190 if (exp(- a * x) > y) {
191 MolToProcMap[i] = which_proc;
192 AtomsPerProc[which_proc] += add_atoms;
193 for (j = 0 ; j < add_atoms; j++ ) {
194 atomIndex++;
195 AtomToProcMap[atomIndex] = which_proc;
196 }
197 done = 1;
198 continue;
199 } else {
200 continue;
201 }
202
203 }
204 }
205
206 // Spray out this nonsense to all other processors:
207
208 MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
209 MPI_INT, 0);
210
211 MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
212 MPI_INT, 0);
213
214 MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
215 MPI_INT, 0);
216
217 MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
218 MPI_INT, 0);
219 } else {
220
221 // Listen to your marching orders from processor 0:
222
223 MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal,
224 MPI_INT, 0);
225
226 MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal,
227 MPI_INT, 0);
228
229 MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal,
230 MPI_INT, 0);
231
232 MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors,
233 MPI_INT, 0);
234 }
235
236
237 // Let's all check for sanity:
238
239 nmol_local = 0;
240 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
241 if (MolToProcMap[i] == mpiPlug->myNode) {
242 nmol_local++;
243 }
244 }
245
246 natoms_local = 0;
247 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
248 if (AtomToProcMap[i] == mpiPlug->myNode) {
249 natoms_local++;
250 }
251 }
252
253 MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
254 MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
255
256 if( nmol_global != entryPlug->n_mol ){
257 sprintf( painCave.errMsg,
258 "The sum of all nmol_local, %d, did not equal the "
259 "total number of molecules, %d.\n",
260 nmol_global, entryPlug->n_mol );
261 painCave.isFatal = 1;
262 simError();
263 }
264
265 if( natoms_global != entryPlug->n_atoms ){
266 sprintf( painCave.errMsg,
267 "The sum of all natoms_local, %d, did not equal the "
268 "total number of atoms, %d.\n",
269 natoms_global, entryPlug->n_atoms );
270 painCave.isFatal = 1;
271 simError();
272 }
273
274 sprintf( checkPointMsg,
275 "Successfully divided the molecules among the processors.\n" );
276 MPIcheckPoint();
277
278 mpiPlug->myNMol = nmol_local;
279 mpiPlug->myNlocal = natoms_local;
280
281 globalIndex = new int[mpiPlug->myNlocal];
282 local_index = 0;
283 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
284 if (AtomToProcMap[i] == mpiPlug->myNode) {
285 local_index++;
286 globalIndex[local_index] = i;
287 }
288 }
289
290 return globalIndex;
291 }
292
293
294 void mpiSimulation::mpiRefresh( void ){
295
296 int isError, i;
297 int *globalIndex = new int[mpiPlug->myNlocal];
298
299 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
300
301
302 isError = 0;
303 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
304 if( isError ){
305
306 sprintf( painCave.errMsg,
307 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
308 painCave.isFatal = 1;
309 simError();
310 }
311
312 delete[] globalIndex;
313
314 sprintf( checkPointMsg,
315 " mpiRefresh successful.\n" );
316 MPIcheckPoint();
317 }
318
319
320 #endif // is_mpi