1 |
|
#ifdef IS_MPI |
2 |
+ |
|
3 |
|
#include <iostream> |
4 |
|
#include <stdlib.h> |
5 |
|
#include <string.h> |
11 |
|
#include "UseTheForce/DarkSide/simParallel_interface.h" |
12 |
|
#include "math/randomSPRNG.hpp" |
13 |
|
|
14 |
< |
mpiSimulation* mpiSim; |
14 |
> |
mpiSimulation * mpiSim; |
15 |
|
|
16 |
< |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
17 |
< |
{ |
17 |
< |
parallelData = new mpiSimData; |
18 |
< |
|
19 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
20 |
< |
parallelData->myNode = worldRank; |
16 |
> |
mpiSimulation::mpiSimulation( SimInfo *the_entryPlug ) { |
17 |
> |
parallelData = new mpiSimData; |
18 |
|
|
19 |
< |
MolToProcMap = new int[entryPlug->n_mol]; |
19 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors)); |
20 |
> |
parallelData->myNode = worldRank; |
21 |
|
|
22 |
+ |
MolToProcMap = new int[entryPlug->n_mol]; |
23 |
|
} |
24 |
|
|
25 |
+ |
mpiSimulation::~mpiSimulation() { |
26 |
+ |
delete [] MolToProcMap; |
27 |
|
|
28 |
< |
mpiSimulation::~mpiSimulation(){ |
29 |
< |
|
29 |
< |
delete[] MolToProcMap; |
28 |
> |
delete parallelData; |
29 |
> |
// perhaps we should let fortran know the party is over. |
30 |
|
|
31 |
– |
delete parallelData; |
32 |
– |
// perhaps we should let fortran know the party is over. |
33 |
– |
|
31 |
|
} |
32 |
|
|
33 |
< |
void mpiSimulation::divideLabor( ){ |
33 |
> |
void mpiSimulation::divideLabor() { |
34 |
> |
int nComponents; |
35 |
> |
MoleculeStamp ** compStamps; |
36 |
> |
randomSPRNG * myRandom; |
37 |
> |
int * componentsNmol; |
38 |
> |
int * AtomsPerProc; |
39 |
|
|
40 |
< |
int nComponents; |
41 |
< |
MoleculeStamp** compStamps; |
42 |
< |
randomSPRNG *myRandom; |
43 |
< |
int* componentsNmol; |
44 |
< |
int* AtomsPerProc; |
45 |
< |
int* GroupsPerProc; |
40 |
> |
double numerator; |
41 |
> |
double denominator; |
42 |
> |
double precast; |
43 |
> |
double x; |
44 |
> |
double y; |
45 |
> |
double a; |
46 |
> |
int old_atoms; |
47 |
> |
int add_atoms; |
48 |
> |
int new_atoms; |
49 |
|
|
50 |
< |
double numerator; |
51 |
< |
double denominator; |
52 |
< |
double precast; |
53 |
< |
double x, y, a; |
54 |
< |
int old_atoms, add_atoms, new_atoms; |
55 |
< |
int old_groups, add_groups, new_groups; |
50 |
> |
int nTarget; |
51 |
> |
int molIndex; |
52 |
> |
int atomIndex; |
53 |
> |
int done; |
54 |
> |
int i; |
55 |
> |
int j; |
56 |
> |
int loops; |
57 |
> |
int which_proc; |
58 |
> |
int nmol_global, |
59 |
> |
nmol_local; |
60 |
> |
int natoms_global, |
61 |
|
|
62 |
< |
int nTarget; |
63 |
< |
int molIndex, atomIndex, groupIndex; |
54 |
< |
int done; |
55 |
< |
int i, j, loops, which_proc; |
56 |
< |
int nmol_global, nmol_local; |
57 |
< |
int ngroups_global, ngroups_local; |
58 |
< |
int natoms_global, natoms_local; |
59 |
< |
int ncutoff_groups, nAtomsInGroups; |
60 |
< |
int local_index; |
61 |
< |
int baseSeed = entryPlug->getSeed(); |
62 |
< |
CutoffGroupStamp* cg; |
62 |
> |
int baseSeed = entryPlug->getSeed(); |
63 |
> |
CutoffGroupStamp * cg; |
64 |
|
|
65 |
< |
nComponents = entryPlug->nComponents; |
66 |
< |
compStamps = entryPlug->compStamps; |
67 |
< |
componentsNmol = entryPlug->componentsNmol; |
68 |
< |
AtomsPerProc = new int[parallelData->nProcessors]; |
68 |
< |
GroupsPerProc = new int[parallelData->nProcessors]; |
69 |
< |
|
70 |
< |
parallelData->nAtomsGlobal = entryPlug->n_atoms; |
71 |
< |
parallelData->nGroupsGlobal = entryPlug->ngroup; |
72 |
< |
parallelData->nMolGlobal = entryPlug->n_mol; |
65 |
> |
nComponents = entryPlug->nComponents; |
66 |
> |
compStamps = entryPlug->compStamps; |
67 |
> |
componentsNmol = entryPlug->componentsNmol; |
68 |
> |
AtomsPerProc = new int[parallelData->nProcessors]; |
69 |
|
|
70 |
< |
if (parallelData->nProcessors > parallelData->nMolGlobal) { |
75 |
< |
sprintf( painCave.errMsg, |
76 |
< |
"nProcessors (%d) > nMol (%d)\n" |
77 |
< |
"\tThe number of processors is larger than\n" |
78 |
< |
"\tthe number of molecules. This will not result in a \n" |
79 |
< |
"\tusable division of atoms for force decomposition.\n" |
80 |
< |
"\tEither try a smaller number of processors, or run the\n" |
81 |
< |
"\tsingle-processor version of OOPSE.\n", |
82 |
< |
parallelData->nProcessors, parallelData->nMolGlobal ); |
83 |
< |
painCave.isFatal = 1; |
84 |
< |
simError(); |
85 |
< |
} |
70 |
> |
parallelData->nMolGlobal = entryPlug->n_mol; |
71 |
|
|
72 |
< |
myRandom = new randomSPRNG( baseSeed ); |
72 |
> |
if (parallelData->nProcessors > parallelData->nMolGlobal) { |
73 |
> |
sprintf(painCave.errMsg, |
74 |
> |
"nProcessors (%d) > nMol (%d)\n" |
75 |
> |
"\tThe number of processors is larger than\n" |
76 |
> |
"\tthe number of molecules. This will not result in a \n" |
77 |
> |
"\tusable division of atoms for force decomposition.\n" |
78 |
> |
"\tEither try a smaller number of processors, or run the\n" |
79 |
> |
"\tsingle-processor version of OOPSE.\n", |
80 |
> |
parallelData->nProcessors, |
81 |
> |
parallelData->nMolGlobal); |
82 |
|
|
83 |
+ |
painCave.isFatal = 1; |
84 |
+ |
simError(); |
85 |
+ |
} |
86 |
|
|
87 |
< |
a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal; |
87 |
> |
myRandom = new randomSPRNG(baseSeed); |
88 |
|
|
89 |
< |
// Initialize things that we'll send out later: |
93 |
< |
for (i = 0; i < parallelData->nProcessors; i++ ) { |
94 |
< |
AtomsPerProc[i] = 0; |
95 |
< |
GroupsPerProc[i] = 0; |
96 |
< |
} |
97 |
< |
for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
98 |
< |
// default to an error condition: |
99 |
< |
MolToProcMap[i] = -1; |
100 |
< |
} |
89 |
> |
a = 3.0 * parallelData->nMolGlobal / parallelData->nAtomsGlobal; |
90 |
|
|
91 |
< |
if (parallelData->myNode == 0) { |
92 |
< |
numerator = (double) entryPlug->n_atoms; |
93 |
< |
denominator = (double) parallelData->nProcessors; |
94 |
< |
precast = numerator / denominator; |
106 |
< |
nTarget = (int)( precast + 0.5 ); |
91 |
> |
// Initialize things that we'll send out later: |
92 |
> |
for( i = 0; i < parallelData->nProcessors; i++ ) { |
93 |
> |
AtomsPerProc[i] = 0; |
94 |
> |
} |
95 |
|
|
96 |
< |
// Build the array of molecule component types first |
97 |
< |
molIndex = 0; |
98 |
< |
for (i=0; i < nComponents; i++) { |
111 |
< |
for (j=0; j < componentsNmol[i]; j++) { |
112 |
< |
molIndex++; |
113 |
< |
} |
96 |
> |
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
97 |
> |
// default to an error condition: |
98 |
> |
MolToProcMap[i] = -1; |
99 |
|
} |
100 |
|
|
101 |
< |
atomIndex = 0; |
101 |
> |
if (parallelData->myNode == 0) { |
102 |
> |
numerator = entryPlug->n_atoms; |
103 |
> |
denominator = parallelData->nProcessors; |
104 |
> |
precast = numerator / denominator; |
105 |
> |
nTarget = (int)(precast + 0.5); |
106 |
|
|
107 |
< |
for (i = 0; i < molIndex; i++ ) { |
107 |
> |
// Build the array of molecule component types first |
108 |
> |
molIndex = 0; |
109 |
|
|
110 |
< |
done = 0; |
111 |
< |
loops = 0; |
110 |
> |
for( i = 0; i < nComponents; i++ ) { |
111 |
> |
for( j = 0; j < componentsNmol[i]; j++ ) { |
112 |
> |
molIndex++; |
113 |
> |
} |
114 |
> |
} |
115 |
|
|
116 |
< |
while( !done ){ |
117 |
< |
loops++; |
118 |
< |
|
126 |
< |
// Pick a processor at random |
116 |
> |
for( i = 0; i < molIndex; i++ ) { |
117 |
> |
done = 0; |
118 |
> |
loops = 0; |
119 |
|
|
120 |
< |
which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors); |
120 |
> |
while (!done) { |
121 |
> |
loops++; |
122 |
|
|
123 |
< |
// How many atoms does this processor have? |
131 |
< |
|
132 |
< |
old_atoms = AtomsPerProc[which_proc]; |
133 |
< |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
134 |
< |
new_atoms = old_atoms + add_atoms; |
135 |
< |
|
136 |
< |
// If we've been through this loop too many times, we need |
137 |
< |
// to just give up and assign the molecule to this processor |
138 |
< |
// and be done with it. |
139 |
< |
|
140 |
< |
if (loops > 100) { |
141 |
< |
sprintf( painCave.errMsg, |
142 |
< |
"I've tried 100 times to assign molecule %d to a " |
143 |
< |
" processor, but can't find a good spot.\n" |
144 |
< |
"I'm assigning it at random to processor %d.\n", |
145 |
< |
i, which_proc); |
146 |
< |
painCave.isFatal = 0; |
147 |
< |
simError(); |
148 |
< |
|
149 |
< |
MolToProcMap[i] = which_proc; |
150 |
< |
AtomsPerProc[which_proc] += add_atoms; |
123 |
> |
// Pick a processor at random |
124 |
|
|
125 |
< |
done = 1; |
153 |
< |
continue; |
154 |
< |
} |
155 |
< |
|
156 |
< |
// If we can add this molecule to this processor without sending |
157 |
< |
// it above nTarget, then go ahead and do it: |
158 |
< |
|
159 |
< |
if (new_atoms <= nTarget) { |
160 |
< |
MolToProcMap[i] = which_proc; |
161 |
< |
AtomsPerProc[which_proc] += add_atoms; |
125 |
> |
which_proc |
126 |
|
|
127 |
< |
done = 1; |
164 |
< |
continue; |
165 |
< |
} |
127 |
> |
= (int) (myRandom->getRandom() * parallelData->nProcessors); |
128 |
|
|
129 |
+ |
// How many atoms does this processor have? |
130 |
|
|
131 |
< |
// The only situation left is when new_atoms > nTarget. We |
132 |
< |
// want to accept this with some probability that dies off the |
133 |
< |
// farther we are from nTarget |
131 |
> |
old_atoms = AtomsPerProc[which_proc]; |
132 |
> |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
133 |
> |
new_atoms = old_atoms + add_atoms; |
134 |
|
|
135 |
< |
// roughly: x = new_atoms - nTarget |
136 |
< |
// Pacc(x) = exp(- a * x) |
137 |
< |
// where a = penalty / (average atoms per molecule) |
135 |
> |
// If we've been through this loop too many times, we need |
136 |
> |
// to just give up and assign the molecule to this processor |
137 |
> |
// and be done with it. |
138 |
|
|
139 |
< |
x = (double) (new_atoms - nTarget); |
140 |
< |
y = myRandom->getRandom(); |
141 |
< |
|
142 |
< |
if (y < exp(- a * x)) { |
143 |
< |
MolToProcMap[i] = which_proc; |
144 |
< |
AtomsPerProc[which_proc] += add_atoms; |
139 |
> |
if (loops > 100) { |
140 |
> |
sprintf( |
141 |
> |
painCave.errMsg, |
142 |
> |
"I've tried 100 times to assign molecule %d to a " |
143 |
> |
" processor, but can't find a good spot.\n" |
144 |
> |
"I'm assigning it at random to processor %d.\n", |
145 |
> |
i, |
146 |
> |
which_proc); |
147 |
|
|
148 |
< |
done = 1; |
149 |
< |
continue; |
185 |
< |
} else { |
186 |
< |
continue; |
187 |
< |
} |
188 |
< |
|
189 |
< |
} |
190 |
< |
} |
148 |
> |
painCave.isFatal = 0; |
149 |
> |
simError(); |
150 |
|
|
151 |
+ |
MolToProcMap[i] = which_proc; |
152 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
153 |
|
|
154 |
< |
// Spray out this nonsense to all other processors: |
154 |
> |
done = 1; |
155 |
> |
continue; |
156 |
> |
} |
157 |
|
|
158 |
< |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
159 |
< |
MPI_INT, 0, MPI_COMM_WORLD); |
197 |
< |
|
198 |
< |
} else { |
158 |
> |
// If we can add this molecule to this processor without sending |
159 |
> |
// it above nTarget, then go ahead and do it: |
160 |
|
|
161 |
+ |
if (new_atoms <= nTarget) { |
162 |
+ |
MolToProcMap[i] = which_proc; |
163 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
164 |
+ |
|
165 |
+ |
done = 1; |
166 |
+ |
continue; |
167 |
+ |
} |
168 |
+ |
|
169 |
+ |
// The only situation left is when new_atoms > nTarget. We |
170 |
+ |
// want to accept this with some probability that dies off the |
171 |
+ |
// farther we are from nTarget |
172 |
+ |
|
173 |
+ |
// roughly: x = new_atoms - nTarget |
174 |
+ |
// Pacc(x) = exp(- a * x) |
175 |
+ |
// where a = penalty / (average atoms per molecule) |
176 |
+ |
|
177 |
+ |
x = (double)(new_atoms - nTarget); |
178 |
+ |
y = myRandom->getRandom(); |
179 |
+ |
|
180 |
+ |
if (y < exp(- a * x)) { |
181 |
+ |
MolToProcMap[i] = which_proc; |
182 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
183 |
+ |
|
184 |
+ |
done = 1; |
185 |
+ |
continue; |
186 |
+ |
} else { continue; } |
187 |
+ |
} |
188 |
+ |
} |
189 |
+ |
|
190 |
+ |
// Spray out this nonsense to all other processors: |
191 |
+ |
|
192 |
+ |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0, |
193 |
+ |
MPI_COMM_WORLD); |
194 |
+ |
} else { |
195 |
+ |
|
196 |
|
// Listen to your marching orders from processor 0: |
201 |
– |
|
202 |
– |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
203 |
– |
MPI_INT, 0, MPI_COMM_WORLD); |
204 |
– |
|
205 |
– |
} |
197 |
|
|
198 |
< |
// Let's all check for sanity: |
198 |
> |
MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, MPI_INT, 0, |
199 |
> |
MPI_COMM_WORLD); } |
200 |
|
|
201 |
< |
nmol_local = 0; |
210 |
< |
for (i = 0 ; i < parallelData->nMolGlobal; i++ ) { |
211 |
< |
if (MolToProcMap[i] == parallelData->myNode) { |
212 |
< |
nmol_local++; |
213 |
< |
} |
214 |
< |
} |
201 |
> |
// Let's all check for sanity: |
202 |
|
|
203 |
+ |
nmol_local = 0; |
204 |
|
|
205 |
< |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
206 |
< |
MPI_COMM_WORLD); |
207 |
< |
|
208 |
< |
if( nmol_global != entryPlug->n_mol ){ |
209 |
< |
sprintf( painCave.errMsg, |
222 |
< |
"The sum of all nmol_local, %d, did not equal the " |
223 |
< |
"total number of molecules, %d.\n", |
224 |
< |
nmol_global, entryPlug->n_mol ); |
225 |
< |
painCave.isFatal = 1; |
226 |
< |
simError(); |
227 |
< |
} |
228 |
< |
|
229 |
< |
sprintf( checkPointMsg, |
230 |
< |
"Successfully divided the molecules among the processors.\n" ); |
231 |
< |
MPIcheckPoint(); |
205 |
> |
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
206 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
207 |
> |
nmol_local++; |
208 |
> |
} |
209 |
> |
} |
210 |
|
|
211 |
< |
parallelData->nMolLocal = nmol_local; |
212 |
< |
parallelData->nAtomsLocal = natoms_local; |
235 |
< |
parallelData->nGroupsLocal = ngroups_local; |
211 |
> |
MPI_Allreduce(&nmol_local, &nmol_global, 1, MPI_INT, MPI_SUM, |
212 |
> |
MPI_COMM_WORLD); |
213 |
|
|
214 |
< |
globalMolIndex.resize(parallelData->nMolLocal); |
215 |
< |
local_index = 0; |
216 |
< |
for (i = 0; i < parallelData->nMolGlobal; i++) { |
217 |
< |
if (MolToProcMap[i] == parallelData->myNode) { |
218 |
< |
globalMolIndex[local_index] = i; |
219 |
< |
local_index++; |
214 |
> |
if (nmol_global != entryPlug->n_mol) { |
215 |
> |
sprintf(painCave.errMsg, |
216 |
> |
"The sum of all nmol_local, %d, did not equal the " |
217 |
> |
"total number of molecules, %d.\n", |
218 |
> |
nmol_global, |
219 |
> |
entryPlug->n_mol); |
220 |
> |
|
221 |
> |
painCave.isFatal = 1; |
222 |
> |
simError(); |
223 |
|
} |
224 |
|
|
225 |
< |
} |
226 |
< |
|
227 |
< |
} |
225 |
> |
sprintf(checkPointMsg, |
226 |
> |
"Successfully divided the molecules among the processors.\n"); |
227 |
> |
MPIcheckPoint(); |
228 |
|
|
229 |
+ |
parallelData->nMolLocal = nmol_local; |
230 |
|
|
231 |
< |
void mpiSimulation::mpiRefresh( void ){ |
231 |
> |
globalMolIndex.resize(parallelData->nMolLocal); |
232 |
> |
local_index = 0; |
233 |
|
|
234 |
< |
int isError, i; |
235 |
< |
int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
236 |
< |
int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
234 |
> |
for( i = 0; i < parallelData->nMolGlobal; i++ ) { |
235 |
> |
if (MolToProcMap[i] == parallelData->myNode) { |
236 |
> |
globalMolIndex[local_index] = i; |
237 |
> |
local_index++; |
238 |
> |
} |
239 |
> |
} |
240 |
> |
} |
241 |
|
|
242 |
< |
// Fortran indexing needs to be increased by 1 in order to get the 2 |
243 |
< |
// languages to not barf |
242 |
> |
void mpiSimulation::mpiRefresh( void ) { |
243 |
> |
int isError, i; |
244 |
> |
int * localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
245 |
> |
int * localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
246 |
|
|
247 |
< |
for(i = 0; i < parallelData->nAtomsLocal; i++) |
247 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 |
248 |
> |
// languages to not barf |
249 |
> |
|
250 |
> |
for( i = 0; i < parallelData->nAtomsLocal; i++ ) |
251 |
|
localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; |
252 |
|
|
253 |
< |
for(i = 0; i < parallelData->nGroupsLocal; i++) |
253 |
> |
for( i = 0; i < parallelData->nGroupsLocal; i++ ) |
254 |
|
localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; |
264 |
– |
|
265 |
– |
isError = 0; |
255 |
|
|
256 |
< |
setFsimParallel( parallelData, |
268 |
< |
&(parallelData->nAtomsLocal), localToGlobalAtomIndex, |
269 |
< |
&(parallelData->nGroupsLocal), localToGlobalGroupIndex, |
270 |
< |
&isError ); |
256 |
> |
isError = 0; |
257 |
|
|
258 |
< |
if( isError ){ |
258 |
> |
setFsimParallel(parallelData, &(parallelData->nAtomsLocal), |
259 |
> |
localToGlobalAtomIndex, &(parallelData->nGroupsLocal), |
260 |
> |
localToGlobalGroupIndex, &isError); |
261 |
|
|
262 |
< |
sprintf( painCave.errMsg, |
263 |
< |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
264 |
< |
painCave.isFatal = 1; |
265 |
< |
simError(); |
266 |
< |
} |
262 |
> |
if (isError) { |
263 |
> |
sprintf(painCave.errMsg, |
264 |
> |
"mpiRefresh errror: fortran didn't like something we gave it.\n"); |
265 |
> |
painCave.isFatal = 1; |
266 |
> |
simError(); |
267 |
> |
} |
268 |
|
|
269 |
< |
delete[] localToGlobalGroupIndex; |
270 |
< |
delete[] localToGlobalAtomIndex; |
269 |
> |
delete [] localToGlobalGroupIndex; |
270 |
> |
delete [] localToGlobalAtomIndex; |
271 |
|
|
272 |
< |
|
273 |
< |
sprintf( checkPointMsg, |
285 |
< |
" mpiRefresh successful.\n" ); |
286 |
< |
MPIcheckPoint(); |
272 |
> |
sprintf(checkPointMsg, " mpiRefresh successful.\n"); |
273 |
> |
MPIcheckPoint(); |
274 |
|
} |
288 |
– |
|
275 |
|
|
276 |
|
#endif // is_mpi |