ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/mpiSimulation.cpp
Revision: 1203
Committed: Thu May 27 18:59:17 2004 UTC (20 years, 1 month ago) by gezelter
File size: 11925 byte(s)
Log Message:
Cutoff group changes under MPI

File Contents

# Content
1 #ifdef IS_MPI
2 #include <iostream>
3 #include <stdlib.h>
4 #include <string.h>
5 #include <math.h>
6 #include <mpi.h>
7
8 #include "mpiSimulation.hpp"
9 #include "simError.h"
10 #include "fortranWrappers.hpp"
11 #include "randomSPRNG.hpp"
12
13 mpiSimulation* mpiSim;
14
15 mpiSimulation::mpiSimulation(SimInfo* the_entryPlug)
16 {
17 entryPlug = the_entryPlug;
18 parallelData = new mpiSimData;
19
20 MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) );
21 parallelData->myNode = worldRank;
22
23 MolToProcMap = new int[entryPlug->n_mol];
24 MolComponentType = new int[entryPlug->n_mol];
25 AtomToProcMap = new int[entryPlug->n_atoms];
26 GroupToProcMap = new int[entryPlug->ngroup];
27
28 mpiSim = this;
29 wrapMeSimParallel( this );
30 }
31
32
33 mpiSimulation::~mpiSimulation(){
34
35 delete[] MolToProcMap;
36 delete[] MolComponentType;
37 delete[] AtomToProcMap;
38 delete[] GroupToProcMap;
39
40 delete parallelData;
41 // perhaps we should let fortran know the party is over.
42
43 }
44
45 void mpiSimulation::divideLabor( ){
46
47 int nComponents;
48 MoleculeStamp** compStamps;
49 randomSPRNG *myRandom;
50 int* componentsNmol;
51 int* AtomsPerProc;
52 int* GroupsPerProc;
53
54 double numerator;
55 double denominator;
56 double precast;
57 double x, y, a;
58 int old_atoms, add_atoms, new_atoms;
59 int old_groups, add_groups, new_groups;
60
61 int nTarget;
62 int molIndex, atomIndex, groupIndex;
63 int done;
64 int i, j, loops, which_proc;
65 int nmol_global, nmol_local;
66 int ngroups_global, ngroups_local;
67 int natoms_global, natoms_local;
68 int ncutoff_groups, nAtomsInGroups;
69 int local_index;
70 int baseSeed = entryPlug->getSeed();
71 CutoffGroupStamp* cg;
72
73 nComponents = entryPlug->nComponents;
74 compStamps = entryPlug->compStamps;
75 componentsNmol = entryPlug->componentsNmol;
76 AtomsPerProc = new int[parallelData->nProcessors];
77 GroupsPerProc = new int[parallelData->nProcessors];
78
79 parallelData->nAtomsGlobal = entryPlug->n_atoms;
80 parallelData->nBondsGlobal = entryPlug->n_bonds;
81 parallelData->nBendsGlobal = entryPlug->n_bends;
82 parallelData->nTorsionsGlobal = entryPlug->n_torsions;
83 parallelData->nSRIGlobal = entryPlug->n_SRI;
84 parallelData->nGroupsGlobal = entryPlug->ngroup;
85 parallelData->nMolGlobal = entryPlug->n_mol;
86
87 myRandom = new randomSPRNG( baseSeed );
88
89 a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal;
90
91 // Initialize things that we'll send out later:
92 for (i = 0; i < parallelData->nProcessors; i++ ) {
93 AtomsPerProc[i] = 0;
94 GroupsPerProc[i] = 0;
95 }
96 for (i = 0; i < parallelData->nMolGlobal; i++ ) {
97 // default to an error condition:
98 MolToProcMap[i] = -1;
99 MolComponentType[i] = -1;
100 }
101 for (i = 0; i < parallelData->nAtomsGlobal; i++ ) {
102 // default to an error condition:
103 AtomToProcMap[i] = -1;
104 }
105 for (i = 0; i < parallelData->nGroupsGlobal; i++ ) {
106 // default to an error condition:
107 GroupToProcMap[i] = -1;
108 }
109
110 if (parallelData->myNode == 0) {
111 numerator = (double) entryPlug->n_atoms;
112 denominator = (double) parallelData->nProcessors;
113 precast = numerator / denominator;
114 nTarget = (int)( precast + 0.5 );
115
116 // Build the array of molecule component types first
117 molIndex = 0;
118 for (i=0; i < nComponents; i++) {
119 for (j=0; j < componentsNmol[i]; j++) {
120 MolComponentType[molIndex] = i;
121 molIndex++;
122 }
123 }
124
125 atomIndex = 0;
126 groupIndex = 0;
127
128 for (i = 0; i < molIndex; i++ ) {
129
130 done = 0;
131 loops = 0;
132
133 while( !done ){
134 loops++;
135
136 // Pick a processor at random
137
138 which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors);
139
140 // How many atoms does this processor have?
141
142 old_atoms = AtomsPerProc[which_proc];
143 add_atoms = compStamps[MolComponentType[i]]->getNAtoms();
144 new_atoms = old_atoms + add_atoms;
145
146 old_groups = GroupsPerProc[which_proc];
147 ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups();
148 nAtomsInGroups = 0;
149 for (j = 0; j < ncutoff_groups; j++) {
150 cg = compStamps[MolComponentType[i]]->getCutoffGroup(j);
151 nAtomsInGroups += cg->getNMembers();
152 }
153 add_groups = add_atoms - nAtomsInGroups + ncutoff_groups;
154 new_groups = old_groups + add_groups;
155
156 // If we've been through this loop too many times, we need
157 // to just give up and assign the molecule to this processor
158 // and be done with it.
159
160 if (loops > 100) {
161 sprintf( painCave.errMsg,
162 "I've tried 100 times to assign molecule %d to a "
163 " processor, but can't find a good spot.\n"
164 "I'm assigning it at random to processor %d.\n",
165 i, which_proc);
166 painCave.isFatal = 0;
167 simError();
168
169 MolToProcMap[i] = which_proc;
170 AtomsPerProc[which_proc] += add_atoms;
171 for (j = 0 ; j < add_atoms; j++ ) {
172 AtomToProcMap[atomIndex] = which_proc;
173 atomIndex++;
174 }
175 GroupsPerProc[which_proc] += add_groups;
176 for (j=0; j < add_groups; j++) {
177 GroupToProcMap[groupIndex] = which_proc;
178 groupIndex++;
179 }
180 done = 1;
181 continue;
182 }
183
184 // If we can add this molecule to this processor without sending
185 // it above nTarget, then go ahead and do it:
186
187 if (new_atoms <= nTarget) {
188 MolToProcMap[i] = which_proc;
189 AtomsPerProc[which_proc] += add_atoms;
190 for (j = 0 ; j < add_atoms; j++ ) {
191 AtomToProcMap[atomIndex] = which_proc;
192 atomIndex++;
193 }
194 GroupsPerProc[which_proc] += add_groups;
195 for (j=0; j < add_groups; j++) {
196 GroupToProcMap[groupIndex] = which_proc;
197 groupIndex++;
198 }
199 done = 1;
200 continue;
201 }
202
203
204 // The only situation left is when new_atoms > nTarget. We
205 // want to accept this with some probability that dies off the
206 // farther we are from nTarget
207
208 // roughly: x = new_atoms - nTarget
209 // Pacc(x) = exp(- a * x)
210 // where a = penalty / (average atoms per molecule)
211
212 x = (double) (new_atoms - nTarget);
213 y = myRandom->getRandom();
214
215 if (y < exp(- a * x)) {
216 MolToProcMap[i] = which_proc;
217 AtomsPerProc[which_proc] += add_atoms;
218 for (j = 0 ; j < add_atoms; j++ ) {
219 AtomToProcMap[atomIndex] = which_proc;
220 atomIndex++;
221 }
222 GroupsPerProc[which_proc] += add_groups;
223 for (j=0; j < add_groups; j++) {
224 GroupToProcMap[groupIndex] = which_proc;
225 groupIndex++;
226 }
227 done = 1;
228 continue;
229 } else {
230 continue;
231 }
232
233 }
234 }
235
236
237 // Spray out this nonsense to all other processors:
238
239 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
240 MPI_INT, 0, MPI_COMM_WORLD);
241
242 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
243 MPI_INT, 0, MPI_COMM_WORLD);
244
245 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
246 MPI_INT, 0, MPI_COMM_WORLD);
247
248 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
249 MPI_INT, 0, MPI_COMM_WORLD);
250
251 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
252 MPI_INT, 0, MPI_COMM_WORLD);
253
254 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
255 MPI_INT, 0, MPI_COMM_WORLD);
256 } else {
257
258 // Listen to your marching orders from processor 0:
259
260 MPI_Bcast(MolToProcMap, parallelData->nMolGlobal,
261 MPI_INT, 0, MPI_COMM_WORLD);
262
263 MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal,
264 MPI_INT, 0, MPI_COMM_WORLD);
265
266 MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal,
267 MPI_INT, 0, MPI_COMM_WORLD);
268
269 MPI_Bcast(MolComponentType, parallelData->nMolGlobal,
270 MPI_INT, 0, MPI_COMM_WORLD);
271
272 MPI_Bcast(AtomsPerProc, parallelData->nProcessors,
273 MPI_INT, 0, MPI_COMM_WORLD);
274
275 MPI_Bcast(GroupsPerProc, parallelData->nProcessors,
276 MPI_INT, 0, MPI_COMM_WORLD);
277
278
279 }
280
281 // Let's all check for sanity:
282
283 nmol_local = 0;
284 for (i = 0 ; i < parallelData->nMolGlobal; i++ ) {
285 if (MolToProcMap[i] == parallelData->myNode) {
286 nmol_local++;
287 }
288 }
289
290 natoms_local = 0;
291 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
292 if (AtomToProcMap[i] == parallelData->myNode) {
293 natoms_local++;
294 }
295 }
296
297 ngroups_local = 0;
298 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
299 if (GroupToProcMap[i] == parallelData->myNode) {
300 ngroups_local++;
301 }
302 }
303
304 MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM,
305 MPI_COMM_WORLD);
306
307 MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT,
308 MPI_SUM, MPI_COMM_WORLD);
309
310 MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT,
311 MPI_SUM, MPI_COMM_WORLD);
312
313 if( nmol_global != entryPlug->n_mol ){
314 sprintf( painCave.errMsg,
315 "The sum of all nmol_local, %d, did not equal the "
316 "total number of molecules, %d.\n",
317 nmol_global, entryPlug->n_mol );
318 painCave.isFatal = 1;
319 simError();
320 }
321
322 if( natoms_global != entryPlug->n_atoms ){
323 sprintf( painCave.errMsg,
324 "The sum of all natoms_local, %d, did not equal the "
325 "total number of atoms, %d.\n",
326 natoms_global, entryPlug->n_atoms );
327 painCave.isFatal = 1;
328 simError();
329 }
330
331 if( ngroups_global != entryPlug->ngroup ){
332 sprintf( painCave.errMsg,
333 "The sum of all ngroups_local, %d, did not equal the "
334 "total number of cutoffGroups, %d.\n",
335 ngroups_global, entryPlug->ngroup );
336 painCave.isFatal = 1;
337 simError();
338 }
339
340 sprintf( checkPointMsg,
341 "Successfully divided the molecules among the processors.\n" );
342 MPIcheckPoint();
343
344 parallelData->nMolLocal = nmol_local;
345 parallelData->nAtomsLocal = natoms_local;
346 parallelData->nGroupsLocal = ngroups_local;
347
348 globalAtomIndex.resize(parallelData->nAtomsLocal);
349 globalToLocalAtom.resize(parallelData->nAtomsGlobal);
350 local_index = 0;
351 for (i = 0; i < parallelData->nAtomsGlobal; i++) {
352 if (AtomToProcMap[i] == parallelData->myNode) {
353 globalAtomIndex[local_index] = i;
354
355 globalToLocalAtom[i] = local_index;
356 local_index++;
357
358 }
359 else
360 globalToLocalAtom[i] = -1;
361 }
362
363 globalGroupIndex.resize(parallelData->nGroupsLocal);
364 globalToLocalGroup.resize(parallelData->nGroupsGlobal);
365 local_index = 0;
366 for (i = 0; i < parallelData->nGroupsGlobal; i++) {
367 if (GroupToProcMap[i] == parallelData->myNode) {
368 globalGroupIndex[local_index] = i;
369
370 globalToLocalGroup[i] = local_index;
371 local_index++;
372
373 }
374 else
375 globalToLocalGroup[i] = -1;
376 }
377
378 globalMolIndex.resize(parallelData->nMolLocal);
379 globalToLocalMol.resize(parallelData->nMolGlobal);
380 local_index = 0;
381 for (i = 0; i < parallelData->nMolGlobal; i++) {
382 if (MolToProcMap[i] == parallelData->myNode) {
383 globalMolIndex[local_index] = i;
384 globalToLocalMol[i] = local_index;
385 local_index++;
386 }
387 else
388 globalToLocalMol[i] = -1;
389 }
390
391 }
392
393
394 void mpiSimulation::mpiRefresh( void ){
395
396 int isError, i;
397 int *globalAtomIndex = new int[parallelData->nAtomsLocal];
398
399 // Fortran indexing needs to be increased by 1 in order to get the 2 languages to
400 // not barf
401
402 for(i=0; i<parallelData->nAtomsLocal; i++) globalAtomIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1;
403
404 isError = 0;
405 setFsimParallel( parallelData, &(entryPlug->n_atoms), globalAtomIndex, &isError );
406 if( isError ){
407
408 sprintf( painCave.errMsg,
409 "mpiRefresh errror: fortran didn't like something we gave it.\n" );
410 painCave.isFatal = 1;
411 simError();
412 }
413
414 delete[] globalAtomIndex;
415
416
417 sprintf( checkPointMsg,
418 " mpiRefresh successful.\n" );
419 MPIcheckPoint();
420 }
421
422
423 #endif // is_mpi