ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 726
Committed: Tue Aug 26 20:37:30 2003 UTC (20 years, 10 months ago) by tim
File size: 8426 byte(s)
Log Message:
set default force substraction policy to PolicyByMass

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <cstdlib>
4 #include <cstring>
5 #include <cmath>
6 #include <mpi.h>
7
8 #include "mpiSimulation.hpp"
9 #include "simError.h"
10 #include "fortranWrappers.hpp"
11 #include "randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 entryPlug = the_entryPlug;
18 mpiPlug = new mpiSimData;
19
20 MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21 mpiPlug->myNode = worldRank;
22
23 MolToProcMap = new int[entryPlug->n_mol];
24 MolComponentType = new int[entryPlug->n_mol];
25 AtomToProcMap = new int[entryPlug->n_atoms];
26
27 mpiSim = this;
28 wrapMeSimParallel( this );
29 }
30
31
32 mpiSimulation::~mpiSimulation(){
33
34 delete[] MolToProcMap;
35 delete[] MolComponentType;
36 delete[] AtomToProcMap;
37
38 delete mpiPlug;
39 // perhaps we should let fortran know the party is over.
40
41 }
42
43 int* mpiSimulation::divideLabor( void ){
44
45 int* globalIndex;
46
47 int nComponents;
48 MoleculeStamp** compStamps;
49 randomSPRNG *myRandom;
50 int* componentsNmol;
51 int* AtomsPerProc;
52
53 double numerator;
54 double denominator;
55 double precast;
56 double x, y, a;
57 int old_atoms, add_atoms, new_atoms;
58
59 int nTarget;
60 int molIndex, atomIndex, compIndex, compStart;
61 int done;
62 int nLocal, molLocal;
63 int i, j, loops, which_proc, nmol_local, natoms_local;
64 int nmol_global, natoms_global;
65 int local_index, index;
66 int smallDiff, bigDiff;
67 int baseSeed = entryPlug->getSeed();
68
69 int testSum;
70
71 nComponents = entryPlug->nComponents;
72 compStamps = entryPlug->compStamps;
73 componentsNmol = entryPlug->componentsNmol;
74 AtomsPerProc = new int[mpiPlug->numberProcessors];
75
76 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
77 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
78 mpiPlug->nBendsGlobal = entryPlug->n_bends;
79 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
80 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
81 mpiPlug->nMolGlobal = entryPlug->n_mol;
82
83
84 myRandom = new randomSPRNG( baseSeed );
85
86 a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
87
88 // Initialize things that we'll send out later:
89 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
90 AtomsPerProc[i] = 0;
91 }
92 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
93 // default to an error condition:
94 MolToProcMap[i] = -1;
95 MolComponentType[i] = -1;
96 }
97 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
98 // default to an error condition:
99 AtomToProcMap[i] = -1;
100 }
101
102 if (mpiPlug->myNode == 0) {
103 numerator = (double) entryPlug->n_atoms;
104 denominator = (double) mpiPlug->numberProcessors;
105 precast = numerator / denominator;
106 nTarget = (int)( precast + 0.5 );
107
108 // Build the array of molecule component types first
109 molIndex = 0;
110 for (i=0; i < nComponents; i++) {
111 for (j=0; j < componentsNmol[i]; j++) {
112 MolComponentType[molIndex] = i;
113 molIndex++;
114 }
115 }
116
117 atomIndex = 0;
118
119 for (i = 0; i < molIndex; i++ ) {
120
121 done = 0;
122 loops = 0;
123
124 while( !done ){
125 loops++;
126
127 // Pick a processor at random
128
129 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
130
131 // How many atoms does this processor have?
132
133 old_atoms = AtomsPerProc[which_proc];
134 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
135 new_atoms = old_atoms + add_atoms;
136
137 // If we've been through this loop too many times, we need
138 // to just give up and assign the molecule to this processor
139 // and be done with it.
140
141 if (loops > 100) {
142 sprintf( painCave.errMsg,
143 "I've tried 100 times to assign molecule %d to a "
144 " processor, but can't find a good spot.\n"
145 "I'm assigning it at random to processor %d.\n",
146 i, which_proc);
147 painCave.isFatal = 0;
148 simError();
149
150 MolToProcMap[i] = which_proc;
151 AtomsPerProc[which_proc] += add_atoms;
152 for (j = 0 ; j < add_atoms; j++ ) {
153 AtomToProcMap[atomIndex] = which_proc;
154 atomIndex++;
155 }
156 done = 1;
157 continue;
158 }
159
160 // If we can add this molecule to this processor without sending
161 // it above nTarget, then go ahead and do it:
162
163 if (new_atoms <= nTarget) {
164 MolToProcMap[i] = which_proc;
165 AtomsPerProc[which_proc] += add_atoms;
166 for (j = 0 ; j < add_atoms; j++ ) {
167 AtomToProcMap[atomIndex] = which_proc;
168 atomIndex++;
169 }
170 done = 1;
171 continue;
172 }
173
174
175 // The only situation left is when new_atoms > nTarget. We
176 // want to accept this with some probability that dies off the
177 // farther we are from nTarget
178
179 // roughly: x = new_atoms - nTarget
180 // Pacc(x) = exp(- a * x)
181 // where a = penalty / (average atoms per molecule)
182
183 x = (double) (new_atoms - nTarget);
184 y = myRandom->getRandom();
185
186 if (y < exp(- a * x)) {
187 MolToProcMap[i] = which_proc;
188 AtomsPerProc[which_proc] += add_atoms;
189 for (j = 0 ; j < add_atoms; j++ ) {
190 AtomToProcMap[atomIndex] = which_proc;
191 atomIndex++;
192 }
193 done = 1;
194 continue;
195 } else {
196 continue;
197 }
198
199 }
200 }
201
202 // Spray out this nonsense to all other processors:
203
204 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
205 MPI_INT, 0, MPI_COMM_WORLD);
206
207 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
208 MPI_INT, 0, MPI_COMM_WORLD);
209
210 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
211 MPI_INT, 0, MPI_COMM_WORLD);
212
213 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
214 MPI_INT, 0, MPI_COMM_WORLD);
215 } else {
216
217 // Listen to your marching orders from processor 0:
218
219 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
220 MPI_INT, 0, MPI_COMM_WORLD);
221
222 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
223 MPI_INT, 0, MPI_COMM_WORLD);
224
225 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
226 MPI_INT, 0, MPI_COMM_WORLD);
227
228 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
229 MPI_INT, 0, MPI_COMM_WORLD);
230
231
232 }
233
234
235 // Let's all check for sanity:
236
237 nmol_local = 0;
238 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
239 if (MolToProcMap[i] == mpiPlug->myNode) {
240 nmol_local++;
241 }
242 }
243
244 natoms_local = 0;
245 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
246 if (AtomToProcMap[i] == mpiPlug->myNode) {
247 natoms_local++;
248 }
249 }
250
251 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
252 MPI_COMM_WORLD);
253 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
254 MPI_SUM, MPI_COMM_WORLD);
255
256 if( nmol_global != entryPlug->n_mol ){
257 sprintf( painCave.errMsg,
258 "The sum of all nmol_local, %d, did not equal the "
259 "total number of molecules, %d.\n",
260 nmol_global, entryPlug->n_mol );
261 painCave.isFatal = 1;
262 simError();
263 }
264
265 if( natoms_global != entryPlug->n_atoms ){
266 sprintf( painCave.errMsg,
267 "The sum of all natoms_local, %d, did not equal the "
268 "total number of atoms, %d.\n",
269 natoms_global, entryPlug->n_atoms );
270 painCave.isFatal = 1;
271 simError();
272 }
273
274 sprintf( checkPointMsg,
275 "Successfully divided the molecules among the processors.\n" );
276 MPIcheckPoint();
277
278 mpiPlug->myNMol = nmol_local;
279 mpiPlug->myNlocal = natoms_local;
280
281 globalIndex = new int[mpiPlug->myNlocal];
282 local_index = 0;
283 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
284 if (AtomToProcMap[i] == mpiPlug->myNode) {
285 globalIndex[local_index] = i;
286 local_index++;
287 }
288 }
289
290 return globalIndex;
291 }
292
293
294 void mpiSimulation::mpiRefresh( void ){
295
296 int isError, i;
297 int *globalIndex = new int[mpiPlug->myNlocal];
298
299 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
300 // not barf
301
302 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
303
304
305 isError = 0;
306 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
307 if( isError ){
308
309 sprintf( painCave.errMsg,
310 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
311 painCave.isFatal = 1;
312 simError();
313 }
314
315 delete[] globalIndex;
316
317 sprintf( checkPointMsg,
318 " mpiRefresh successful.\n" );
319 MPIcheckPoint();
320 }
321
322
323 #endif // is_mpi