1 |
|
#ifdef IS_MPI |
2 |
< |
|
2 |
> |
#include <iostream> |
3 |
|
#include <cstdlib> |
4 |
|
#include <cstring> |
5 |
+ |
#include <cmath> |
6 |
|
#include <mpi.h> |
6 |
– |
#include <mpi++.h> |
7 |
|
|
8 |
|
#include "mpiSimulation.hpp" |
9 |
|
#include "simError.h" |
10 |
|
#include "fortranWrappers.hpp" |
11 |
|
#include "randomSPRNG.hpp" |
12 |
|
|
13 |
+ |
#define BASE_SEED 123456789 |
14 |
|
|
15 |
|
mpiSimulation* mpiSim; |
16 |
|
|
19 |
|
entryPlug = the_entryPlug; |
20 |
|
mpiPlug = new mpiSimData; |
21 |
|
|
22 |
< |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
22 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
23 |
|
mpiPlug->myNode = worldRank; |
24 |
|
|
25 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
26 |
|
MolComponentType = new int[entryPlug->n_mol]; |
26 |
– |
|
27 |
|
AtomToProcMap = new int[entryPlug->n_atoms]; |
28 |
|
|
29 |
|
mpiSim = this; |
33 |
|
|
34 |
|
mpiSimulation::~mpiSimulation(){ |
35 |
|
|
36 |
+ |
delete[] MolToProcMap; |
37 |
+ |
delete[] MolComponentType; |
38 |
+ |
delete[] AtomToProcMap; |
39 |
+ |
|
40 |
|
delete mpiPlug; |
41 |
|
// perhaps we should let fortran know the party is over. |
42 |
|
|
48 |
|
|
49 |
|
int nComponents; |
50 |
|
MoleculeStamp** compStamps; |
51 |
< |
randomSPRNG myRandom; |
51 |
> |
randomSPRNG *myRandom; |
52 |
|
int* componentsNmol; |
53 |
|
int* AtomsPerProc; |
54 |
|
|
62 |
|
int molIndex, atomIndex, compIndex, compStart; |
63 |
|
int done; |
64 |
|
int nLocal, molLocal; |
65 |
< |
int i, index; |
65 |
> |
int i, j, loops, which_proc, nmol_local, natoms_local; |
66 |
> |
int nmol_global, natoms_global; |
67 |
> |
int local_index, index; |
68 |
|
int smallDiff, bigDiff; |
69 |
+ |
int baseSeed = BASE_SEED; |
70 |
|
|
71 |
|
int testSum; |
72 |
|
|
82 |
|
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
83 |
|
mpiPlug->nMolGlobal = entryPlug->n_mol; |
84 |
|
|
85 |
< |
myRandom = new randomSPRNG(); |
85 |
> |
myRandom = new randomSPRNG( baseSeed ); |
86 |
|
|
87 |
< |
a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
87 |
> |
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
88 |
|
|
89 |
|
// Initialize things that we'll send out later: |
90 |
|
for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
127 |
|
|
128 |
|
// Pick a processor at random |
129 |
|
|
130 |
< |
which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); |
130 |
> |
which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
131 |
|
|
132 |
|
// How many atoms does this processor have? |
133 |
|
|
134 |
|
old_atoms = AtomsPerProc[which_proc]; |
135 |
< |
|
129 |
< |
// If the processor already had too many atoms, just skip this |
130 |
< |
// processor and try again. |
131 |
< |
|
132 |
< |
if (old_atoms >= nTarget) continue; |
133 |
< |
|
134 |
< |
add_atoms = compStamps[MolComponentType[i]]->getNatoms(); |
135 |
> |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
136 |
|
new_atoms = old_atoms + add_atoms; |
136 |
– |
|
137 |
– |
// If we can add this molecule to this processor without sending |
138 |
– |
// it above nTarget, then go ahead and do it: |
139 |
– |
|
140 |
– |
if (new_atoms <= nTarget) { |
141 |
– |
MolToProcMap[i] = which_proc; |
142 |
– |
AtomsPerProc[which_proc] += add_atoms; |
143 |
– |
for (j = 0 ; j < add_atoms; j++ ) { |
144 |
– |
atomIndex++; |
145 |
– |
AtomToProcMap[atomIndex] = which_proc; |
146 |
– |
} |
147 |
– |
done = 1; |
148 |
– |
continue; |
149 |
– |
} |
137 |
|
|
138 |
|
// If we've been through this loop too many times, we need |
139 |
|
// to just give up and assign the molecule to this processor |
151 |
|
MolToProcMap[i] = which_proc; |
152 |
|
AtomsPerProc[which_proc] += add_atoms; |
153 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
154 |
< |
atomIndex++; |
155 |
< |
AtomToProcMap[atomIndex] = which_proc; |
154 |
> |
AtomToProcMap[atomIndex] = which_proc; |
155 |
> |
atomIndex++; |
156 |
|
} |
157 |
|
done = 1; |
158 |
|
continue; |
159 |
|
} |
160 |
+ |
|
161 |
+ |
// If we can add this molecule to this processor without sending |
162 |
+ |
// it above nTarget, then go ahead and do it: |
163 |
+ |
|
164 |
+ |
if (new_atoms <= nTarget) { |
165 |
+ |
MolToProcMap[i] = which_proc; |
166 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
167 |
+ |
for (j = 0 ; j < add_atoms; j++ ) { |
168 |
+ |
AtomToProcMap[atomIndex] = which_proc; |
169 |
+ |
atomIndex++; |
170 |
+ |
} |
171 |
+ |
done = 1; |
172 |
+ |
continue; |
173 |
+ |
} |
174 |
|
|
174 |
– |
// The only situation left is where old_atoms < nTarget, but |
175 |
– |
// new_atoms > nTarget. We want to accept this with some |
176 |
– |
// probability that dies off the farther we are from nTarget |
175 |
|
|
176 |
+ |
// The only situation left is when new_atoms > nTarget. We |
177 |
+ |
// want to accept this with some probability that dies off the |
178 |
+ |
// farther we are from nTarget |
179 |
+ |
|
180 |
|
// roughly: x = new_atoms - nTarget |
181 |
|
// Pacc(x) = exp(- a * x) |
182 |
< |
// where a = 1 / (average atoms per molecule) |
182 |
> |
// where a = penalty / (average atoms per molecule) |
183 |
|
|
184 |
|
x = (double) (new_atoms - nTarget); |
185 |
< |
y = myRandom.getRandom(); |
186 |
< |
|
187 |
< |
if (exp(- a * x) > y) { |
185 |
> |
y = myRandom->getRandom(); |
186 |
> |
|
187 |
> |
if (y < exp(- a * x)) { |
188 |
|
MolToProcMap[i] = which_proc; |
189 |
|
AtomsPerProc[which_proc] += add_atoms; |
190 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
191 |
< |
atomIndex++; |
192 |
< |
AtomToProcMap[atomIndex] = which_proc; |
193 |
< |
} |
191 |
> |
AtomToProcMap[atomIndex] = which_proc; |
192 |
> |
atomIndex++; |
193 |
> |
} |
194 |
|
done = 1; |
195 |
|
continue; |
196 |
|
} else { |
202 |
|
|
203 |
|
// Spray out this nonsense to all other processors: |
204 |
|
|
205 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
206 |
< |
MPI_INT, 0); |
205 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
207 |
|
|
208 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 |
< |
MPI_INT, 0); |
208 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
210 |
|
|
211 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
212 |
< |
MPI_INT, 0); |
211 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
213 |
|
|
214 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
215 |
< |
MPI_INT, 0); |
214 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
216 |
|
} else { |
217 |
|
|
218 |
|
// Listen to your marching orders from processor 0: |
219 |
|
|
220 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
221 |
< |
MPI_INT, 0); |
220 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
222 |
|
|
223 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 |
< |
MPI_INT, 0); |
223 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
225 |
|
|
226 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
227 |
< |
MPI_INT, 0); |
226 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
228 |
|
|
229 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
230 |
< |
MPI_INT, 0); |
229 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
231 |
> |
|
232 |
> |
|
233 |
|
} |
234 |
|
|
235 |
|
|
249 |
|
} |
250 |
|
} |
251 |
|
|
252 |
< |
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
253 |
< |
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
252 |
> |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
253 |
> |
MPI_COMM_WORLD); |
254 |
> |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
255 |
> |
MPI_SUM, MPI_COMM_WORLD); |
256 |
|
|
257 |
|
if( nmol_global != entryPlug->n_mol ){ |
258 |
|
sprintf( painCave.errMsg, |
283 |
|
local_index = 0; |
284 |
|
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
285 |
|
if (AtomToProcMap[i] == mpiPlug->myNode) { |
280 |
– |
local_index++; |
286 |
|
globalIndex[local_index] = i; |
287 |
+ |
local_index++; |
288 |
|
} |
289 |
|
} |
290 |
|
|
291 |
< |
|
286 |
< |
|
287 |
< |
|
288 |
< |
index = mpiPlug->myAtomStart; |
289 |
< |
// for( i=0; i<mpiPlug->myNlocal; i++){ |
290 |
< |
// globalIndex[i] = index; |
291 |
< |
// index++; |
292 |
< |
// } |
293 |
< |
|
294 |
< |
// return globalIndex; |
291 |
> |
return globalIndex; |
292 |
|
} |
293 |
|
|
294 |
|
|
297 |
|
int isError, i; |
298 |
|
int *globalIndex = new int[mpiPlug->myNlocal]; |
299 |
|
|
300 |
< |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
300 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
301 |
> |
// not barf |
302 |
|
|
303 |
+ |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
304 |
+ |
|
305 |
|
|
306 |
|
isError = 0; |
307 |
|
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |