ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 708
Committed: Wed Aug 20 22:23:34 2003 UTC (20 years, 10 months ago) by tim
File size: 8455 byte(s)
Log Message:
user can setup seed in bass file now, if he does not specify any value for
seed, oopse will take the value of seconds of system time as seed

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <cstdlib>
4 #include <cstring>
5 #include <cmath>
6 #include <mpi.h>
7
8 #include "mpiSimulation.hpp"
9 #include "simError.h"
10 #include "fortranWrappers.hpp"
11 #include "randomSPRNG.hpp"
12
13 #define BASE_SEED 123456789
14
15 mpiSimulation* mpiSim;
16
17 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
18 {
19 entryPlug = the_entryPlug;
20 mpiPlug = new mpiSimData;
21
22 MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
23 mpiPlug->myNode = worldRank;
24
25 MolToProcMap = new int[entryPlug->n_mol];
26 MolComponentType = new int[entryPlug->n_mol];
27 AtomToProcMap = new int[entryPlug->n_atoms];
28
29 mpiSim = this;
30 wrapMeSimParallel( this );
31 }
32
33
34 mpiSimulation::~mpiSimulation(){
35
36 delete[] MolToProcMap;
37 delete[] MolComponentType;
38 delete[] AtomToProcMap;
39
40 delete mpiPlug;
41 // perhaps we should let fortran know the party is over.
42
43 }
44
45 int* mpiSimulation::divideLabor( void ){
46
47 int* globalIndex;
48
49 int nComponents;
50 MoleculeStamp** compStamps;
51 randomSPRNG *myRandom;
52 int* componentsNmol;
53 int* AtomsPerProc;
54
55 double numerator;
56 double denominator;
57 double precast;
58 double x, y, a;
59 int old_atoms, add_atoms, new_atoms;
60
61 int nTarget;
62 int molIndex, atomIndex, compIndex, compStart;
63 int done;
64 int nLocal, molLocal;
65 int i, j, loops, which_proc, nmol_local, natoms_local;
66 int nmol_global, natoms_global;
67 int local_index, index;
68 int smallDiff, bigDiff;
69 int baseSeed = entryPlug->getSeed();
70
71 int testSum;
72
73 nComponents = entryPlug->nComponents;
74 compStamps = entryPlug->compStamps;
75 componentsNmol = entryPlug->componentsNmol;
76 AtomsPerProc = new int[mpiPlug->numberProcessors];
77
78 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
79 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
80 mpiPlug->nBendsGlobal = entryPlug->n_bends;
81 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
82 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
83 mpiPlug->nMolGlobal = entryPlug->n_mol;
84
85
86 myRandom = new randomSPRNG( baseSeed );
87
88 a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
89
90 // Initialize things that we'll send out later:
91 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
92 AtomsPerProc[i] = 0;
93 }
94 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
95 // default to an error condition:
96 MolToProcMap[i] = -1;
97 MolComponentType[i] = -1;
98 }
99 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
100 // default to an error condition:
101 AtomToProcMap[i] = -1;
102 }
103
104 if (mpiPlug->myNode == 0) {
105 numerator = (double) entryPlug->n_atoms;
106 denominator = (double) mpiPlug->numberProcessors;
107 precast = numerator / denominator;
108 nTarget = (int)( precast + 0.5 );
109
110 // Build the array of molecule component types first
111 molIndex = 0;
112 for (i=0; i < nComponents; i++) {
113 for (j=0; j < componentsNmol[i]; j++) {
114 MolComponentType[molIndex] = i;
115 molIndex++;
116 }
117 }
118
119 atomIndex = 0;
120
121 for (i = 0; i < molIndex; i++ ) {
122
123 done = 0;
124 loops = 0;
125
126 while( !done ){
127 loops++;
128
129 // Pick a processor at random
130
131 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
132
133 // How many atoms does this processor have?
134
135 old_atoms = AtomsPerProc[which_proc];
136 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
137 new_atoms = old_atoms + add_atoms;
138
139 // If we've been through this loop too many times, we need
140 // to just give up and assign the molecule to this processor
141 // and be done with it.
142
143 if (loops > 100) {
144 sprintf( painCave.errMsg,
145 "I've tried 100 times to assign molecule %d to a "
146 " processor, but can't find a good spot.\n"
147 "I'm assigning it at random to processor %d.\n",
148 i, which_proc);
149 painCave.isFatal = 0;
150 simError();
151
152 MolToProcMap[i] = which_proc;
153 AtomsPerProc[which_proc] += add_atoms;
154 for (j = 0 ; j < add_atoms; j++ ) {
155 AtomToProcMap[atomIndex] = which_proc;
156 atomIndex++;
157 }
158 done = 1;
159 continue;
160 }
161
162 // If we can add this molecule to this processor without sending
163 // it above nTarget, then go ahead and do it:
164
165 if (new_atoms <= nTarget) {
166 MolToProcMap[i] = which_proc;
167 AtomsPerProc[which_proc] += add_atoms;
168 for (j = 0 ; j < add_atoms; j++ ) {
169 AtomToProcMap[atomIndex] = which_proc;
170 atomIndex++;
171 }
172 done = 1;
173 continue;
174 }
175
176
177 // The only situation left is when new_atoms > nTarget. We
178 // want to accept this with some probability that dies off the
179 // farther we are from nTarget
180
181 // roughly: x = new_atoms - nTarget
182 // Pacc(x) = exp(- a * x)
183 // where a = penalty / (average atoms per molecule)
184
185 x = (double) (new_atoms - nTarget);
186 y = myRandom->getRandom();
187
188 if (y < exp(- a * x)) {
189 MolToProcMap[i] = which_proc;
190 AtomsPerProc[which_proc] += add_atoms;
191 for (j = 0 ; j < add_atoms; j++ ) {
192 AtomToProcMap[atomIndex] = which_proc;
193 atomIndex++;
194 }
195 done = 1;
196 continue;
197 } else {
198 continue;
199 }
200
201 }
202 }
203
204 // Spray out this nonsense to all other processors:
205
206 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
207 MPI_INT, 0, MPI_COMM_WORLD);
208
209 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
210 MPI_INT, 0, MPI_COMM_WORLD);
211
212 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
213 MPI_INT, 0, MPI_COMM_WORLD);
214
215 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
216 MPI_INT, 0, MPI_COMM_WORLD);
217 } else {
218
219 // Listen to your marching orders from processor 0:
220
221 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
222 MPI_INT, 0, MPI_COMM_WORLD);
223
224 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
225 MPI_INT, 0, MPI_COMM_WORLD);
226
227 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
228 MPI_INT, 0, MPI_COMM_WORLD);
229
230 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
231 MPI_INT, 0, MPI_COMM_WORLD);
232
233
234 }
235
236
237 // Let's all check for sanity:
238
239 nmol_local = 0;
240 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
241 if (MolToProcMap[i] == mpiPlug->myNode) {
242 nmol_local++;
243 }
244 }
245
246 natoms_local = 0;
247 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
248 if (AtomToProcMap[i] == mpiPlug->myNode) {
249 natoms_local++;
250 }
251 }
252
253 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
254 MPI_COMM_WORLD);
255 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
256 MPI_SUM, MPI_COMM_WORLD);
257
258 if( nmol_global != entryPlug->n_mol ){
259 sprintf( painCave.errMsg,
260 "The sum of all nmol_local, %d, did not equal the "
261 "total number of molecules, %d.\n",
262 nmol_global, entryPlug->n_mol );
263 painCave.isFatal = 1;
264 simError();
265 }
266
267 if( natoms_global != entryPlug->n_atoms ){
268 sprintf( painCave.errMsg,
269 "The sum of all natoms_local, %d, did not equal the "
270 "total number of atoms, %d.\n",
271 natoms_global, entryPlug->n_atoms );
272 painCave.isFatal = 1;
273 simError();
274 }
275
276 sprintf( checkPointMsg,
277 "Successfully divided the molecules among the processors.\n" );
278 MPIcheckPoint();
279
280 mpiPlug->myNMol = nmol_local;
281 mpiPlug->myNlocal = natoms_local;
282
283 globalIndex = new int[mpiPlug->myNlocal];
284 local_index = 0;
285 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
286 if (AtomToProcMap[i] == mpiPlug->myNode) {
287 globalIndex[local_index] = i;
288 local_index++;
289 }
290 }
291
292 return globalIndex;
293 }
294
295
296 void mpiSimulation::mpiRefresh( void ){
297
298 int isError, i;
299 int *globalIndex = new int[mpiPlug->myNlocal];
300
301 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
302 // not barf
303
304 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
305
306
307 isError = 0;
308 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
309 if( isError ){
310
311 sprintf( painCave.errMsg,
312 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
313 painCave.isFatal = 1;
314 simError();
315 }
316
317 delete[] globalIndex;
318
319 sprintf( checkPointMsg,
320 " mpiRefresh successful.\n" );
321 MPIcheckPoint();
322 }
323
324
325 #endif // is_mpi