ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/branches/new_design/OOPSE-4/src/brains/mpiSimulation.cpp
Revision: 1722
Committed: Tue Nov 9 23:11:39 2004 UTC (19 years, 9 months ago) by tim
File size: 7759 byte(s)
Log Message:
adding ForceManager

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <math.h>
6 #include <mpi.h>
7
8 #include "brains/mpiSimulation.hpp"
9 #include "utils/simError.h"
10 #include "UseTheForce/DarkSide/simParallel_interface.h"
11 #include "math/randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 parallelData = new mpiSimData;
18
19 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
20 parallelData->myNode = worldRank;
21
22 MolToProcMap = new int[entryPlug->n_mol];
23
24 }
25
26
27 mpiSimulation::~mpiSimulation(){
28
29 delete[] MolToProcMap;
30
31 delete parallelData;
32 // perhaps we should let fortran know the party is over.
33
34 }
35
36 void mpiSimulation::divideLabor( ){
37
38 int nComponents;
39 MoleculeStamp** compStamps;
40 randomSPRNG *myRandom;
41 int* componentsNmol;
42 int* AtomsPerProc;
43 int* GroupsPerProc;
44
45 double numerator;
46 double denominator;
47 double precast;
48 double x, y, a;
49 int old_atoms, add_atoms, new_atoms;
50 int old_groups, add_groups, new_groups;
51
52 int nTarget;
53 int molIndex, atomIndex, groupIndex;
54 int done;
55 int i, j, loops, which_proc;
56 int nmol_global, nmol_local;
57 int ngroups_global, ngroups_local;
58 int natoms_global, natoms_local;
59 int ncutoff_groups, nAtomsInGroups;
60 int local_index;
61 int baseSeed = entryPlug->getSeed();
62 CutoffGroupStamp* cg;
63
64 nComponents = entryPlug->nComponents;
65 compStamps = entryPlug->compStamps;
66 componentsNmol = entryPlug->componentsNmol;
67 AtomsPerProc = new int[parallelData->nProcessors];
68 GroupsPerProc = new int[parallelData->nProcessors];
69
70 parallelData->nAtomsGlobal = entryPlug->n_atoms;
71 parallelData->nGroupsGlobal = entryPlug->ngroup;
72 parallelData->nMolGlobal = entryPlug->n_mol;
73
74 if (parallelData->nProcessors > parallelData->nMolGlobal) {
75 sprintf( painCave.errMsg,
76 "nProcessors (%d) > nMol (%d)\n"
77 "\tThe number of processors is larger than\n"
78 "\tthe number of molecules. This will not result in a \n"
79 "\tusable division of atoms for force decomposition.\n"
80 "\tEither try a smaller number of processors, or run the\n"
81 "\tsingle-processor version of OOPSE.\n",
82 parallelData->nProcessors, parallelData->nMolGlobal );
83 painCave.isFatal = 1;
84 simError();
85 }
86
87 myRandom = new randomSPRNG( baseSeed );
88
89
90 a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
91
92 // Initialize things that we'll send out later:
93 for (i = 0; i < parallelData->nProcessors; i++ ) {
94 AtomsPerProc[i] = 0;
95 GroupsPerProc[i] = 0;
96 }
97 for (i = 0; i < parallelData->nMolGlobal; i++ ) {
98 // default to an error condition:
99 MolToProcMap[i] = -1;
100 }
101
102 if (parallelData->myNode == 0) {
103 numerator = (double) entryPlug->n_atoms;
104 denominator = (double) parallelData->nProcessors;
105 precast = numerator / denominator;
106 nTarget = (int)( precast + 0.5 );
107
108 // Build the array of molecule component types first
109 molIndex = 0;
110 for (i=0; i < nComponents; i++) {
111 for (j=0; j < componentsNmol[i]; j++) {
112 molIndex++;
113 }
114 }
115
116 atomIndex = 0;
117
118 for (i = 0; i < molIndex; i++ ) {
119
120 done = 0;
121 loops = 0;
122
123 while( !done ){
124 loops++;
125
126 // Pick a processor at random
127
128 which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
129
130 // How many atoms does this processor have?
131
132 old_atoms = AtomsPerProc[which_proc];
133 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
134 new_atoms = old_atoms + add_atoms;
135
136 // If we've been through this loop too many times, we need
137 // to just give up and assign the molecule to this processor
138 // and be done with it.
139
140 if (loops > 100) {
141 sprintf( painCave.errMsg,
142 "I've tried 100 times to assign molecule %d to a "
143 " processor, but can't find a good spot.\n"
144 "I'm assigning it at random to processor %d.\n",
145 i, which_proc);
146 painCave.isFatal = 0;
147 simError();
148
149 MolToProcMap[i] = which_proc;
150 AtomsPerProc[which_proc] += add_atoms;
151
152 done = 1;
153 continue;
154 }
155
156 // If we can add this molecule to this processor without sending
157 // it above nTarget, then go ahead and do it:
158
159 if (new_atoms <= nTarget) {
160 MolToProcMap[i] = which_proc;
161 AtomsPerProc[which_proc] += add_atoms;
162
163 done = 1;
164 continue;
165 }
166
167
168 // The only situation left is when new_atoms > nTarget. We
169 // want to accept this with some probability that dies off the
170 // farther we are from nTarget
171
172 // roughly: x = new_atoms - nTarget
173 // Pacc(x) = exp(- a * x)
174 // where a = penalty / (average atoms per molecule)
175
176 x = (double) (new_atoms - nTarget);
177 y = myRandom->getRandom();
178
179 if (y < exp(- a * x)) {
180 MolToProcMap[i] = which_proc;
181 AtomsPerProc[which_proc] += add_atoms;
182
183 done = 1;
184 continue;
185 } else {
186 continue;
187 }
188
189 }
190 }
191
192
193 // Spray out this nonsense to all other processors:
194
195 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
196 MPI_INT, 0, MPI_COMM_WORLD);
197
198 } else {
199
200 // Listen to your marching orders from processor 0:
201
202 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
203 MPI_INT, 0, MPI_COMM_WORLD);
204
205 }
206
207 // Let's all check for sanity:
208
209 nmol_local = 0;
210 for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
211 if (MolToProcMap[i] == parallelData->myNode) {
212 nmol_local++;
213 }
214 }
215
216
217 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
218 MPI_COMM_WORLD);
219
220 if( nmol_global != entryPlug->n_mol ){
221 sprintf( painCave.errMsg,
222 "The sum of all nmol_local, %d, did not equal the "
223 "total number of molecules, %d.\n",
224 nmol_global, entryPlug->n_mol );
225 painCave.isFatal = 1;
226 simError();
227 }
228
229 sprintf( checkPointMsg,
230 "Successfully divided the molecules among the processors.\n" );
231 MPIcheckPoint();
232
233 parallelData->nMolLocal = nmol_local;
234 parallelData->nAtomsLocal = natoms_local;
235 parallelData->nGroupsLocal = ngroups_local;
236
237 globalMolIndex.resize(parallelData->nMolLocal);
238 local_index = 0;
239 for (i = 0; i < parallelData->nMolGlobal; i++) {
240 if (MolToProcMap[i] == parallelData->myNode) {
241 globalMolIndex[local_index] = i;
242 local_index++;
243 }
244
245 }
246
247 }
248
249
250 void mpiSimulation::mpiRefresh( void ){
251
252 int isError, i;
253 int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal];
254 int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal];
255
256 // Fortran indexing needs to be increased by 1 in order to get the 2
257 // languages to not barf
258
259 for(i = 0; i < parallelData->nAtomsLocal; i++)
260 localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1;
261
262 for(i = 0; i < parallelData->nGroupsLocal; i++)
263 localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1;
264
265 isError = 0;
266
267 setFsimParallel( parallelData,
268 &(parallelData->nAtomsLocal), localToGlobalAtomIndex,
269 &(parallelData->nGroupsLocal), localToGlobalGroupIndex,
270 &isError );
271
272 if( isError ){
273
274 sprintf( painCave.errMsg,
275 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
276 painCave.isFatal = 1;
277 simError();
278 }
279
280 delete[] localToGlobalGroupIndex;
281 delete[] localToGlobalAtomIndex;
282
283
284 sprintf( checkPointMsg,
285 " mpiRefresh successful.\n" );
286 MPIcheckPoint();
287 }
288
289
290 #endif // is_mpi