ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 418
Committed: Thu Mar 27 14:30:24 2003 UTC (21 years, 3 months ago) by mmeineke
File size: 8649 byte(s)
Log Message:
little bug fixes here and there.

File Contents

# Content
1 #ifdef IS_MPI
2
3 #include <cstdlib>
4 #include <cstring>
5 #include <cmath>
6 #include <mpi.h>
7 #include <mpi++.h>
8
9 #include "mpiSimulation.hpp"
10 #include "simError.h"
11 #include "fortranWrappers.hpp"
12 #include "randomSPRNG.hpp"
13
14 #define BASE_SEED 123456789
15
16 mpiSimulation* mpiSim;
17
18 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
19 {
20 entryPlug = the_entryPlug;
21 mpiPlug = new mpiSimData;
22
23 mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size();
24 mpiPlug->myNode = worldRank;
25
26 MolToProcMap = new int[entryPlug->n_mol];
27 MolComponentType = new int[entryPlug->n_mol];
28
29 AtomToProcMap = new int[entryPlug->n_atoms];
30
31 mpiSim = this;
32 wrapMeSimParallel( this );
33 }
34
35
36 mpiSimulation::~mpiSimulation(){
37
38 delete[] MolToProcMap;
39 delete[] MolComponentType;
40 delete[] AtomToProcMap;
41
42 delete mpiPlug;
43 // perhaps we should let fortran know the party is over.
44
45 }
46
47 int* mpiSimulation::divideLabor( void ){
48
49 int* globalIndex;
50
51 int nComponents;
52 MoleculeStamp** compStamps;
53 randomSPRNG *myRandom;
54 int* componentsNmol;
55 int* AtomsPerProc;
56
57 double numerator;
58 double denominator;
59 double precast;
60 double x, y, a;
61 int old_atoms, add_atoms, new_atoms;
62
63 int nTarget;
64 int molIndex, atomIndex, compIndex, compStart;
65 int done;
66 int nLocal, molLocal;
67 int i, j, loops, which_proc, nmol_local, natoms_local;
68 int nmol_global, natoms_global;
69 int local_index, index;
70 int smallDiff, bigDiff;
71 int baseSeed = BASE_SEED;
72
73 int testSum;
74
75 nComponents = entryPlug->nComponents;
76 compStamps = entryPlug->compStamps;
77 componentsNmol = entryPlug->componentsNmol;
78 AtomsPerProc = new int[mpiPlug->numberProcessors];
79
80 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
81 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
82 mpiPlug->nBendsGlobal = entryPlug->n_bends;
83 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
84 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
85 mpiPlug->nMolGlobal = entryPlug->n_mol;
86
87 myRandom = new randomSPRNG( baseSeed );
88
89 a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
90
91 // Initialize things that we'll send out later:
92 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
93 AtomsPerProc[i] = 0;
94 }
95 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
96 // default to an error condition:
97 MolToProcMap[i] = -1;
98 MolComponentType[i] = -1;
99 }
100 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
101 // default to an error condition:
102 AtomToProcMap[i] = -1;
103 }
104
105 if (mpiPlug->myNode == 0) {
106 numerator = (double) entryPlug->n_atoms;
107 denominator = (double) mpiPlug->numberProcessors;
108 precast = numerator / denominator;
109 nTarget = (int)( precast + 0.5 );
110
111 // Build the array of molecule component types first
112 molIndex = 0;
113 for (i=0; i < nComponents; i++) {
114 for (j=0; j < componentsNmol[i]; j++) {
115 MolComponentType[molIndex] = i;
116 molIndex++;
117 }
118 }
119
120 atomIndex = 0;
121
122 for (i = 0; i < molIndex; i++ ) {
123
124 done = 0;
125 loops = 0;
126
127 while( !done ){
128 loops++;
129
130 // Pick a processor at random
131
132 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
133
134 // How many atoms does this processor have?
135
136 old_atoms = AtomsPerProc[which_proc];
137
138 // If the processor already had too many atoms, just skip this
139 // processor and try again.
140
141 if (old_atoms >= nTarget) continue;
142
143 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144 new_atoms = old_atoms + add_atoms;
145
146 // If we can add this molecule to this processor without sending
147 // it above nTarget, then go ahead and do it:
148
149 if (new_atoms <= nTarget) {
150 MolToProcMap[i] = which_proc;
151 AtomsPerProc[which_proc] += add_atoms;
152 for (j = 0 ; j < add_atoms; j++ ) {
153 atomIndex++;
154 AtomToProcMap[atomIndex] = which_proc;
155 }
156 done = 1;
157 continue;
158 }
159
160 // If we've been through this loop too many times, we need
161 // to just give up and assign the molecule to this processor
162 // and be done with it.
163
164 if (loops > 100) {
165 sprintf( painCave.errMsg,
166 "I've tried 100 times to assign molecule %d to a "
167 " processor, but can't find a good spot.\n"
168 "I'm assigning it at random to processor %d.\n",
169 i, which_proc);
170 painCave.isFatal = 0;
171 simError();
172
173 MolToProcMap[i] = which_proc;
174 AtomsPerProc[which_proc] += add_atoms;
175 for (j = 0 ; j < add_atoms; j++ ) {
176 atomIndex++;
177 AtomToProcMap[atomIndex] = which_proc;
178 }
179 done = 1;
180 continue;
181 }
182
183 // The only situation left is where old_atoms < nTarget, but
184 // new_atoms > nTarget. We want to accept this with some
185 // probability that dies off the farther we are from nTarget
186
187 // roughly: x = new_atoms - nTarget
188 // Pacc(x) = exp(- a * x)
189 // where a = 1 / (average atoms per molecule)
190
191 x = (double) (new_atoms - nTarget);
192 y = myRandom->getRandom();
193
194 if (exp(- a * x) > y) {
195 MolToProcMap[i] = which_proc;
196 AtomsPerProc[which_proc] += add_atoms;
197 for (j = 0 ; j < add_atoms; j++ ) {
198 atomIndex++;
199 AtomToProcMap[atomIndex] = which_proc;
200 }
201 done = 1;
202 continue;
203 } else {
204 continue;
205 }
206
207 }
208 }
209
210 // Spray out this nonsense to all other processors:
211
212 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 MPI_INT, 0);
214
215 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 MPI_INT, 0);
217
218 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 MPI_INT, 0);
220
221 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 MPI_INT, 0);
223 } else {
224
225 // Listen to your marching orders from processor 0:
226
227 MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal,
228 MPI_INT, 0);
229
230 MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
231 MPI_INT, 0);
232
233 MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal,
234 MPI_INT, 0);
235
236 MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors,
237 MPI_INT, 0);
238 }
239
240
241 // Let's all check for sanity:
242
243 nmol_local = 0;
244 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
245 if (MolToProcMap[i] == mpiPlug->myNode) {
246 nmol_local++;
247 }
248 }
249
250 natoms_local = 0;
251 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
252 if (AtomToProcMap[i] == mpiPlug->myNode) {
253 natoms_local++;
254 }
255 }
256
257 MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM);
258 MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM);
259
260 if( nmol_global != entryPlug->n_mol ){
261 sprintf( painCave.errMsg,
262 "The sum of all nmol_local, %d, did not equal the "
263 "total number of molecules, %d.\n",
264 nmol_global, entryPlug->n_mol );
265 painCave.isFatal = 1;
266 simError();
267 }
268
269 if( natoms_global != entryPlug->n_atoms ){
270 sprintf( painCave.errMsg,
271 "The sum of all natoms_local, %d, did not equal the "
272 "total number of atoms, %d.\n",
273 natoms_global, entryPlug->n_atoms );
274 painCave.isFatal = 1;
275 simError();
276 }
277
278 sprintf( checkPointMsg,
279 "Successfully divided the molecules among the processors.\n" );
280 MPIcheckPoint();
281
282 mpiPlug->myNMol = nmol_local;
283 mpiPlug->myNlocal = natoms_local;
284
285 globalIndex = new int[mpiPlug->myNlocal];
286 local_index = 0;
287 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
288 if (AtomToProcMap[i] == mpiPlug->myNode) {
289 local_index++;
290 globalIndex[local_index] = i;
291 }
292 }
293
294 return globalIndex;
295 }
296
297
298 void mpiSimulation::mpiRefresh( void ){
299
300 int isError, i;
301 int *globalIndex = new int[mpiPlug->myNlocal];
302
303 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex();
304
305
306 isError = 0;
307 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
308 if( isError ){
309
310 sprintf( painCave.errMsg,
311 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
312 painCave.isFatal = 1;
313 simError();
314 }
315
316 delete[] globalIndex;
317
318 sprintf( checkPointMsg,
319 " mpiRefresh successful.\n" );
320 MPIcheckPoint();
321 }
322
323
324 #endif // is_mpi