ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 1108
Committed: Wed Apr 14 15:37:41 2004 UTC (20 years, 2 months ago) by tim
File size: 8687 byte(s)
Log Message:
Change DumpWriter and InitFromFile

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <math.h>
6 #include <mpi.h>
7
8 #include "mpiSimulation.hpp"
9 #include "simError.h"
10 #include "fortranWrappers.hpp"
11 #include "randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 entryPlug = the_entryPlug;
18 mpiPlug = new mpiSimData;
19
20 MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) );
21 mpiPlug->myNode = worldRank;
22
23 MolToProcMap = new int[entryPlug->n_mol];
24 MolComponentType = new int[entryPlug->n_mol];
25 AtomToProcMap = new int[entryPlug->n_atoms];
26
27 mpiSim = this;
28 wrapMeSimParallel( this );
29 }
30
31
32 mpiSimulation::~mpiSimulation(){
33
34 delete[] MolToProcMap;
35 delete[] MolComponentType;
36 delete[] AtomToProcMap;
37
38 delete mpiPlug;
39 // perhaps we should let fortran know the party is over.
40
41 }
42
43 void mpiSimulation::divideLabor( ){
44
45 int nComponents;
46 MoleculeStamp** compStamps;
47 randomSPRNG *myRandom;
48 int* componentsNmol;
49 int* AtomsPerProc;
50
51 double numerator;
52 double denominator;
53 double precast;
54 double x, y, a;
55 int old_atoms, add_atoms, new_atoms;
56
57 int nTarget;
58 int molIndex, atomIndex;
59 int done;
60 int i, j, loops, which_proc, nmol_local, natoms_local;
61 int nmol_global, natoms_global;
62 int local_index;
63 int baseSeed = entryPlug->getSeed();
64
65 nComponents = entryPlug->nComponents;
66 compStamps = entryPlug->compStamps;
67 componentsNmol = entryPlug->componentsNmol;
68 AtomsPerProc = new int[mpiPlug->numberProcessors];
69
70 mpiPlug->nAtomsGlobal = entryPlug->n_atoms;
71 mpiPlug->nBondsGlobal = entryPlug->n_bonds;
72 mpiPlug->nBendsGlobal = entryPlug->n_bends;
73 mpiPlug->nTorsionsGlobal = entryPlug->n_torsions;
74 mpiPlug->nSRIGlobal = entryPlug->n_SRI;
75 mpiPlug->nMolGlobal = entryPlug->n_mol;
76
77 myRandom = new randomSPRNG( baseSeed );
78
79 a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal;
80
81 // Initialize things that we'll send out later:
82 for (i = 0; i < mpiPlug->numberProcessors; i++ ) {
83 AtomsPerProc[i] = 0;
84 }
85 for (i = 0; i < mpiPlug->nMolGlobal; i++ ) {
86 // default to an error condition:
87 MolToProcMap[i] = -1;
88 MolComponentType[i] = -1;
89 }
90 for (i = 0; i < mpiPlug->nAtomsGlobal; i++ ) {
91 // default to an error condition:
92 AtomToProcMap[i] = -1;
93 }
94
95 if (mpiPlug->myNode == 0) {
96 numerator = (double) entryPlug->n_atoms;
97 denominator = (double) mpiPlug->numberProcessors;
98 precast = numerator / denominator;
99 nTarget = (int)( precast + 0.5 );
100
101 // Build the array of molecule component types first
102 molIndex = 0;
103 for (i=0; i < nComponents; i++) {
104 for (j=0; j < componentsNmol[i]; j++) {
105 MolComponentType[molIndex] = i;
106 molIndex++;
107 }
108 }
109
110 atomIndex = 0;
111
112 for (i = 0; i < molIndex; i++ ) {
113
114 done = 0;
115 loops = 0;
116
117 while( !done ){
118 loops++;
119
120 // Pick a processor at random
121
122 which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors);
123
124 // How many atoms does this processor have?
125
126 old_atoms = AtomsPerProc[which_proc];
127 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
128 new_atoms = old_atoms + add_atoms;
129
130 // If we've been through this loop too many times, we need
131 // to just give up and assign the molecule to this processor
132 // and be done with it.
133
134 if (loops > 100) {
135 sprintf( painCave.errMsg,
136 "I've tried 100 times to assign molecule %d to a "
137 " processor, but can't find a good spot.\n"
138 "I'm assigning it at random to processor %d.\n",
139 i, which_proc);
140 painCave.isFatal = 0;
141 simError();
142
143 MolToProcMap[i] = which_proc;
144 AtomsPerProc[which_proc] += add_atoms;
145 for (j = 0 ; j < add_atoms; j++ ) {
146 AtomToProcMap[atomIndex] = which_proc;
147 atomIndex++;
148 }
149 done = 1;
150 continue;
151 }
152
153 // If we can add this molecule to this processor without sending
154 // it above nTarget, then go ahead and do it:
155
156 if (new_atoms <= nTarget) {
157 MolToProcMap[i] = which_proc;
158 AtomsPerProc[which_proc] += add_atoms;
159 for (j = 0 ; j < add_atoms; j++ ) {
160 AtomToProcMap[atomIndex] = which_proc;
161 atomIndex++;
162 }
163 done = 1;
164 continue;
165 }
166
167
168 // The only situation left is when new_atoms > nTarget. We
169 // want to accept this with some probability that dies off the
170 // farther we are from nTarget
171
172 // roughly: x = new_atoms - nTarget
173 // Pacc(x) = exp(- a * x)
174 // where a = penalty / (average atoms per molecule)
175
176 x = (double) (new_atoms - nTarget);
177 y = myRandom->getRandom();
178
179 if (y < exp(- a * x)) {
180 MolToProcMap[i] = which_proc;
181 AtomsPerProc[which_proc] += add_atoms;
182 for (j = 0 ; j < add_atoms; j++ ) {
183 AtomToProcMap[atomIndex] = which_proc;
184 atomIndex++;
185 }
186 done = 1;
187 continue;
188 } else {
189 continue;
190 }
191
192 }
193 }
194
195 // Spray out this nonsense to all other processors:
196
197 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
198 MPI_INT, 0, MPI_COMM_WORLD);
199
200 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
201 MPI_INT, 0, MPI_COMM_WORLD);
202
203 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
204 MPI_INT, 0, MPI_COMM_WORLD);
205
206 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
207 MPI_INT, 0, MPI_COMM_WORLD);
208 } else {
209
210 // Listen to your marching orders from processor 0:
211
212 MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal,
213 MPI_INT, 0, MPI_COMM_WORLD);
214
215 MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal,
216 MPI_INT, 0, MPI_COMM_WORLD);
217
218 MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal,
219 MPI_INT, 0, MPI_COMM_WORLD);
220
221 MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors,
222 MPI_INT, 0, MPI_COMM_WORLD);
223
224
225 }
226
227
228 // Let's all check for sanity:
229
230 nmol_local = 0;
231 for (i = 0 ; i < mpiPlug->nMolGlobal; i++ ) {
232 if (MolToProcMap[i] == mpiPlug->myNode) {
233 nmol_local++;
234 }
235 }
236
237 natoms_local = 0;
238 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
239 if (AtomToProcMap[i] == mpiPlug->myNode) {
240 natoms_local++;
241 }
242 }
243
244 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
245 MPI_COMM_WORLD);
246 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
247 MPI_SUM, MPI_COMM_WORLD);
248
249 if( nmol_global != entryPlug->n_mol ){
250 sprintf( painCave.errMsg,
251 "The sum of all nmol_local, %d, did not equal the "
252 "total number of molecules, %d.\n",
253 nmol_global, entryPlug->n_mol );
254 painCave.isFatal = 1;
255 simError();
256 }
257
258 if( natoms_global != entryPlug->n_atoms ){
259 sprintf( painCave.errMsg,
260 "The sum of all natoms_local, %d, did not equal the "
261 "total number of atoms, %d.\n",
262 natoms_global, entryPlug->n_atoms );
263 painCave.isFatal = 1;
264 simError();
265 }
266
267 sprintf( checkPointMsg,
268 "Successfully divided the molecules among the processors.\n" );
269 MPIcheckPoint();
270
271 mpiPlug->myNMol = nmol_local;
272 mpiPlug->myNlocal = natoms_local;
273
274 globalAtomIndex.resize(mpiPlug->myNlocal);
275 local_index = 0;
276 for (i = 0; i < mpiPlug->nAtomsGlobal; i++) {
277 if (AtomToProcMap[i] == mpiPlug->myNode) {
278 globalAtomIndex[local_index] = i;
279
280 globalToLocalAtom[i] = local_index;
281 local_index++;
282
283 }
284 else
285 globalToLocalAtom[i] = -1;
286 }
287
288 globalMolIndex.resize(mpiPlug->myNMol);
289 local_index = 0;
290 for (i = 0; i < mpiPlug->nMolGlobal; i++) {
291 if (MolToProcMap[i] == mpiPlug->myNode) {
292 globalMolIndex[local_index] = i;
293 globalToLocalMol[i] = local_index;
294 local_index++;
295 }
296 else
297 globalToLocalMol[i] = -1;
298 }
299
300 }
301
302
303 void mpiSimulation::mpiRefresh( void ){
304
305 int isError, i;
306 int *globalIndex = new int[mpiPlug->myNlocal];
307
308 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
309 // not barf
310
311 for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
312
313
314 isError = 0;
315 setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError );
316 if( isError ){
317
318 sprintf( painCave.errMsg,
319 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
320 painCave.isFatal = 1;
321 simError();
322 }
323
324 delete[] globalIndex;
325
326 sprintf( checkPointMsg,
327 " mpiRefresh successful.\n" );
328 MPIcheckPoint();
329 }
330
331
332 #endif // is_mpi