ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 989
Committed: Tue Jan 27 19:38:27 2004 UTC (20 years, 5 months ago) by gezelter
File size: 8334 byte(s)
Log Message:
More BASS changes to do new rigidBody scheme
a copy of WATER.cpp from this morning

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <math.h>
6 #include <mpi.h>
7
8 #include "mpiSimulation.hpp"
9 #include "simError.h"
10 #include "fortranWrappers.hpp"
11 #include "randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 entryPlug = the_entryPlug;
18 mpiPlug = new mpiSimData;
19
20 MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21 mpiPlug->myNode = worldRank;
22
23 MolToProcMap = new int[entryPlug->n_mol];
24 MolComponentType = new int[entryPlug->n_mol];
25 AtomToProcMap = new int[entryPlug->n_atoms];
26
27 mpiSim = this;
28 wrapMeSimParallel( this );
29 }
30
31
32 mpiSimulation::~mpiSimulation(){
33
34 delete[] MolToProcMap;
35 delete[] MolComponentType;
36 delete[] AtomToProcMap;
37
38 delete mpiPlug;
39 // perhaps we should let fortran know the party is over.
40
41 }
42
43 int* mpiSimulation::divideLabor( void ){
44
45 int* globalIndex;
46
47 int nComponents;
48 MoleculeStamp** compStamps;
49 randomSPRNG *myRandom;
50 int* componentsNmol;
51 int* AtomsPerProc;
52
53 double numerator;
54 double denominator;
55 double precast;
56 double x, y, a;
57 int old_atoms, add_atoms, new_atoms;
58
59 int nTarget;
60 int molIndex, atomIndex;
61 int done;
62 int i, j, loops, which_proc, nmol_local, natoms_local;
63 int nmol_global, natoms_global;
64 int local_index;
65 int baseSeed = entryPlug->getSeed();
66
67 nComponents = entryPlug->nComponents;
68 compStamps = entryPlug->compStamps;
69 componentsNmol = entryPlug->componentsNmol;
70 AtomsPerProc = new int[mpiPlug->numberProcessors];
71
72 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
73 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
74 mpiPlug->nBendsGlobal = entryPlug->n_bends;
75 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
76 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
77 mpiPlug->nMolGlobal = entryPlug->n_mol;
78
79
80 myRandom = new randomSPRNG( baseSeed );
81
82 a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
83
84 // Initialize things that we'll send out later:
85 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
86 AtomsPerProc[i] = 0;
87 }
88 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
89 // default to an error condition:
90 MolToProcMap[i] = -1;
91 MolComponentType[i] = -1;
92 }
93 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
94 // default to an error condition:
95 AtomToProcMap[i] = -1;
96 }
97
98 if (mpiPlug->myNode == 0) {
99 numerator = (double) entryPlug->n_atoms;
100 denominator = (double) mpiPlug->numberProcessors;
101 precast = numerator / denominator;
102 nTarget = (int)( precast + 0.5 );
103
104 // Build the array of molecule component types first
105 molIndex = 0;
106 for (i=0; i < nComponents; i++) {
107 for (j=0; j < componentsNmol[i]; j++) {
108 MolComponentType[molIndex] = i;
109 molIndex++;
110 }
111 }
112
113 atomIndex = 0;
114
115 for (i = 0; i < molIndex; i++ ) {
116
117 done = 0;
118 loops = 0;
119
120 while( !done ){
121 loops++;
122
123 // Pick a processor at random
124
125 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
126
127 // How many atoms does this processor have?
128
129 old_atoms = AtomsPerProc[which_proc];
130 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
131 new_atoms = old_atoms + add_atoms;
132
133 // If we've been through this loop too many times, we need
134 // to just give up and assign the molecule to this processor
135 // and be done with it.
136
137 if (loops > 100) {
138 sprintf( painCave.errMsg,
139 "I've tried 100 times to assign molecule %d to a "
140 " processor, but can't find a good spot.\n"
141 "I'm assigning it at random to processor %d.\n",
142 i, which_proc);
143 painCave.isFatal = 0;
144 simError();
145
146 MolToProcMap[i] = which_proc;
147 AtomsPerProc[which_proc] += add_atoms;
148 for (j = 0 ; j < add_atoms; j++ ) {
149 AtomToProcMap[atomIndex] = which_proc;
150 atomIndex++;
151 }
152 done = 1;
153 continue;
154 }
155
156 // If we can add this molecule to this processor without sending
157 // it above nTarget, then go ahead and do it:
158
159 if (new_atoms <= nTarget) {
160 MolToProcMap[i] = which_proc;
161 AtomsPerProc[which_proc] += add_atoms;
162 for (j = 0 ; j < add_atoms; j++ ) {
163 AtomToProcMap[atomIndex] = which_proc;
164 atomIndex++;
165 }
166 done = 1;
167 continue;
168 }
169
170
171 // The only situation left is when new_atoms > nTarget. We
172 // want to accept this with some probability that dies off the
173 // farther we are from nTarget
174
175 // roughly: x = new_atoms - nTarget
176 // Pacc(x) = exp(- a * x)
177 // where a = penalty / (average atoms per molecule)
178
179 x = (double) (new_atoms - nTarget);
180 y = myRandom->getRandom();
181
182 if (y < exp(- a * x)) {
183 MolToProcMap[i] = which_proc;
184 AtomsPerProc[which_proc] += add_atoms;
185 for (j = 0 ; j < add_atoms; j++ ) {
186 AtomToProcMap[atomIndex] = which_proc;
187 atomIndex++;
188 }
189 done = 1;
190 continue;
191 } else {
192 continue;
193 }
194
195 }
196 }
197
198 // Spray out this nonsense to all other processors:
199
200 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
201 MPI_INT, 0, MPI_COMM_WORLD);
202
203 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
204 MPI_INT, 0, MPI_COMM_WORLD);
205
206 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
207 MPI_INT, 0, MPI_COMM_WORLD);
208
209 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
210 MPI_INT, 0, MPI_COMM_WORLD);
211 } else {
212
213 // Listen to your marching orders from processor 0:
214
215 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
216 MPI_INT, 0, MPI_COMM_WORLD);
217
218 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
219 MPI_INT, 0, MPI_COMM_WORLD);
220
221 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
222 MPI_INT, 0, MPI_COMM_WORLD);
223
224 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
225 MPI_INT, 0, MPI_COMM_WORLD);
226
227
228 }
229
230
231 // Let's all check for sanity:
232
233 nmol_local = 0;
234 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
235 if (MolToProcMap[i] == mpiPlug->myNode) {
236 nmol_local++;
237 }
238 }
239
240 natoms_local = 0;
241 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
242 if (AtomToProcMap[i] == mpiPlug->myNode) {
243 natoms_local++;
244 }
245 }
246
247 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
248 MPI_COMM_WORLD);
249 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
250 MPI_SUM, MPI_COMM_WORLD);
251
252 if( nmol_global != entryPlug->n_mol ){
253 sprintf( painCave.errMsg,
254 "The sum of all nmol_local, %d, did not equal the "
255 "total number of molecules, %d.\n",
256 nmol_global, entryPlug->n_mol );
257 painCave.isFatal = 1;
258 simError();
259 }
260
261 if( natoms_global != entryPlug->n_atoms ){
262 sprintf( painCave.errMsg,
263 "The sum of all natoms_local, %d, did not equal the "
264 "total number of atoms, %d.\n",
265 natoms_global, entryPlug->n_atoms );
266 painCave.isFatal = 1;
267 simError();
268 }
269
270 sprintf( checkPointMsg,
271 "Successfully divided the molecules among the processors.\n" );
272 MPIcheckPoint();
273
274 mpiPlug->myNMol = nmol_local;
275 mpiPlug->myNlocal = natoms_local;
276
277 globalIndex = new int[mpiPlug->myNlocal];
278 local_index = 0;
279 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
280 if (AtomToProcMap[i] == mpiPlug->myNode) {
281 globalIndex[local_index] = i;
282 local_index++;
283 }
284 }
285
286 return globalIndex;
287 }
288
289
290 void mpiSimulation::mpiRefresh( void ){
291
292 int isError, i;
293 int *globalIndex = new int[mpiPlug->myNlocal];
294
295 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
296 // not barf
297
298 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
299
300
301 isError = 0;
302 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
303 if( isError ){
304
305 sprintf( painCave.errMsg,
306 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
307 painCave.isFatal = 1;
308 simError();
309 }
310
311 delete[] globalIndex;
312
313 sprintf( checkPointMsg,
314 " mpiRefresh successful.\n" );
315 MPIcheckPoint();
316 }
317
318
319 #endif // is_mpi