ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/branches/new_design/OOPSE-2.0/src/brains/mpiSimulation.cpp
Revision: 1723
Committed: Wed Nov 10 02:58:55 2004 UTC (19 years, 8 months ago) by tim
File size: 8053 byte(s)
Log Message:
mpiSimulation in progress

File Contents

# Content
1 #ifdef IS_MPI
2
3 #include <iostream>
4 #include <stdlib.h>
5 #include <string.h>
6 #include <math.h>
7 #include <mpi.h>
8
9 #include "brains/mpiSimulation.hpp"
10 #include "utils/simError.h"
11 #include "UseTheForce/DarkSide/simParallel_interface.h"
12 #include "math/randomSPRNG.hpp"
13
14 mpiSimulation * mpiSim;
15
16 mpiSimulation::mpiSimulation( SimInfo *the_entryPlug ) {
17 parallelData = new mpiSimData;
18
19 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors));
20 parallelData->myNode = worldRank;
21
22 MolToProcMap = new int[entryPlug->n_mol];
23 }
24
25 mpiSimulation::~mpiSimulation() {
26 delete [] MolToProcMap;
27
28 delete parallelData;
29 // perhaps we should let fortran know the party is over.
30
31 }
32
33 void mpiSimulation::divideLabor() {
34 int nComponents;
35 MoleculeStamp ** compStamps;
36 randomSPRNG * myRandom;
37 int * componentsNmol;
38 int * AtomsPerProc;
39
40 double numerator;
41 double denominator;
42 double precast;
43 double x;
44 double y;
45 double a;
46 int old_atoms;
47 int add_atoms;
48 int new_atoms;
49
50 int nTarget;
51 int molIndex;
52 int atomIndex;
53 int done;
54 int i;
55 int j;
56 int loops;
57 int which_proc;
58 int nmol_global,
59 nmol_local;
60 int natoms_global,
61
62 int baseSeed = entryPlug->getSeed();
63 CutoffGroupStamp * cg;
64
65 nComponents = entryPlug->nComponents;
66 compStamps = entryPlug->compStamps;
67 componentsNmol = entryPlug->componentsNmol;
68 AtomsPerProc = new int[parallelData->nProcessors];
69
70 parallelData->nMolGlobal = entryPlug->n_mol;
71
72 if (parallelData->nProcessors > parallelData->nMolGlobal) {
73 sprintf(painCave.errMsg,
74 "nProcessors (%d) > nMol (%d)\n"
75 "\tThe number of processors is larger than\n"
76 "\tthe number of molecules. This will not result in a \n"
77 "\tusable division of atoms for force decomposition.\n"
78 "\tEither try a smaller number of processors, or run the\n"
79 "\tsingle-processor version of OOPSE.\n",
80 parallelData->nProcessors,
81 parallelData->nMolGlobal);
82
83 painCave.isFatal = 1;
84 simError();
85 }
86
87 myRandom = new randomSPRNG(baseSeed);
88
89 a = 3.0 * parallelData->nMolGlobal / parallelData->nAtomsGlobal;
90
91 // Initialize things that we'll send out later:
92 for( i = 0; i < parallelData->nProcessors; i++ ) {
93 AtomsPerProc[i] = 0;
94 }
95
96 for( i = 0; i < parallelData->nMolGlobal; i++ ) {
97 // default to an error condition:
98 MolToProcMap[i] = -1;
99 }
100
101 if (parallelData->myNode == 0) {
102 numerator = entryPlug->n_atoms;
103 denominator = parallelData->nProcessors;
104 precast = numerator / denominator;
105 nTarget = (int)(precast + 0.5);
106
107 // Build the array of molecule component types first
108 molIndex = 0;
109
110 for( i = 0; i < nComponents; i++ ) {
111 for( j = 0; j < componentsNmol[i]; j++ ) {
112 molIndex++;
113 }
114 }
115
116 for( i = 0; i < molIndex; i++ ) {
117 done = 0;
118 loops = 0;
119
120 while (!done) {
121 loops++;
122
123 // Pick a processor at random
124
125 which_proc
126
127 = (int) (myRandom->getRandom() * parallelData->nProcessors);
128
129 // How many atoms does this processor have?
130
131 old_atoms = AtomsPerProc[which_proc];
132 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
133 new_atoms = old_atoms + add_atoms;
134
135 // If we've been through this loop too many times, we need
136 // to just give up and assign the molecule to this processor
137 // and be done with it.
138
139 if (loops > 100) {
140 sprintf(
141 painCave.errMsg,
142 "I've tried 100 times to assign molecule %d to a "
143 " processor, but can't find a good spot.\n"
144 "I'm assigning it at random to processor %d.\n",
145 i,
146 which_proc);
147
148 painCave.isFatal = 0;
149 simError();
150
151 MolToProcMap[i] = which_proc;
152 AtomsPerProc[which_proc] += add_atoms;
153
154 done = 1;
155 continue;
156 }
157
158 // If we can add this molecule to this processor without sending
159 // it above nTarget, then go ahead and do it:
160
161 if (new_atoms <= nTarget) {
162 MolToProcMap[i] = which_proc;
163 AtomsPerProc[which_proc] += add_atoms;
164
165 done = 1;
166 continue;
167 }
168
169 // The only situation left is when new_atoms > nTarget. We
170 // want to accept this with some probability that dies off the
171 // farther we are from nTarget
172
173 // roughly: x = new_atoms - nTarget
174 // Pacc(x) = exp(- a * x)
175 // where a = penalty / (average atoms per molecule)
176
177 x = (double)(new_atoms - nTarget);
178 y = myRandom->getRandom();
179
180 if (y < exp(- a * x)) {
181 MolToProcMap[i] = which_proc;
182 AtomsPerProc[which_proc] += add_atoms;
183
184 done = 1;
185 continue;
186 } else { continue; }
187 }
188 }
189
190 // Spray out this nonsense to all other processors:
191
192 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0,
193 MPI_COMM_WORLD);
194 } else {
195
196 // Listen to your marching orders from processor 0:
197
198 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0,
199 MPI_COMM_WORLD); }
200
201 // Let's all check for sanity:
202
203 nmol_local = 0;
204
205 for( i = 0; i < parallelData->nMolGlobal; i++ ) {
206 if (MolToProcMap[i] == parallelData->myNode) {
207 nmol_local++;
208 }
209 }
210
211 MPI_Allreduce(&nmol_local, &nmol_global, 1, MPI_INT, MPI_SUM,
212 MPI_COMM_WORLD);
213
214 if (nmol_global != entryPlug->n_mol) {
215 sprintf(painCave.errMsg,
216 "The sum of all nmol_local, %d, did not equal the "
217 "total number of molecules, %d.\n",
218 nmol_global,
219 entryPlug->n_mol);
220
221 painCave.isFatal = 1;
222 simError();
223 }
224
225 sprintf(checkPointMsg,
226 "Successfully divided the molecules among the processors.\n");
227 MPIcheckPoint();
228
229 parallelData->nMolLocal = nmol_local;
230
231 globalMolIndex.resize(parallelData->nMolLocal);
232 local_index = 0;
233
234 for( i = 0; i < parallelData->nMolGlobal; i++ ) {
235 if (MolToProcMap[i] == parallelData->myNode) {
236 globalMolIndex[local_index] = i;
237 local_index++;
238 }
239 }
240 }
241
242 void mpiSimulation::mpiRefresh( void ) {
243 int isError, i;
244 int * localToGlobalAtomIndex = new int[parallelData->nAtomsLocal];
245 int * localToGlobalGroupIndex = new int[parallelData->nGroupsLocal];
246
247 // Fortran indexing needs to be increased by 1 in order to get the 2
248 // languages to not barf
249
250 for( i = 0; i < parallelData->nAtomsLocal; i++ )
251 localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1;
252
253 for( i = 0; i < parallelData->nGroupsLocal; i++ )
254 localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1;
255
256 isError = 0;
257
258 setFsimParallel(parallelData, &(parallelData->nAtomsLocal),
259 localToGlobalAtomIndex, &(parallelData->nGroupsLocal),
260 localToGlobalGroupIndex, &isError);
261
262 if (isError) {
263 sprintf(painCave.errMsg,
264 "mpiRefresh errror: fortran didn't like something we gave it.\n");
265 painCave.isFatal = 1;
266 simError();
267 }
268
269 delete [] localToGlobalGroupIndex;
270 delete [] localToGlobalAtomIndex;
271
272 sprintf(checkPointMsg, " mpiRefresh successful.\n");
273 MPIcheckPoint();
274 }
275
276 #endif // is_mpi