1 |
|
#ifdef IS_MPI |
2 |
< |
|
2 |
> |
#include <iostream> |
3 |
|
#include <cstdlib> |
4 |
|
#include <cstring> |
5 |
|
#include <cmath> |
25 |
|
|
26 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
27 |
|
MolComponentType = new int[entryPlug->n_mol]; |
28 |
– |
|
28 |
|
AtomToProcMap = new int[entryPlug->n_atoms]; |
29 |
|
|
30 |
|
mpiSim = this; |
34 |
|
|
35 |
|
mpiSimulation::~mpiSimulation(){ |
36 |
|
|
37 |
+ |
delete[] MolToProcMap; |
38 |
+ |
delete[] MolComponentType; |
39 |
+ |
delete[] AtomToProcMap; |
40 |
+ |
|
41 |
|
delete mpiPlug; |
42 |
|
// perhaps we should let fortran know the party is over. |
43 |
|
|
133 |
|
// How many atoms does this processor have? |
134 |
|
|
135 |
|
old_atoms = AtomsPerProc[which_proc]; |
136 |
+ |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
137 |
+ |
new_atoms = old_atoms + add_atoms; |
138 |
|
|
139 |
|
// If the processor already had too many atoms, just skip this |
140 |
|
// processor and try again. |
141 |
|
|
137 |
– |
if (old_atoms >= nTarget) continue; |
138 |
– |
|
139 |
– |
add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
140 |
– |
new_atoms = old_atoms + add_atoms; |
141 |
– |
|
142 |
– |
// If we can add this molecule to this processor without sending |
143 |
– |
// it above nTarget, then go ahead and do it: |
144 |
– |
|
145 |
– |
if (new_atoms <= nTarget) { |
146 |
– |
MolToProcMap[i] = which_proc; |
147 |
– |
AtomsPerProc[which_proc] += add_atoms; |
148 |
– |
for (j = 0 ; j < add_atoms; j++ ) { |
149 |
– |
atomIndex++; |
150 |
– |
AtomToProcMap[atomIndex] = which_proc; |
151 |
– |
} |
152 |
– |
done = 1; |
153 |
– |
continue; |
154 |
– |
} |
155 |
– |
|
142 |
|
// If we've been through this loop too many times, we need |
143 |
|
// to just give up and assign the molecule to this processor |
144 |
|
// and be done with it. |
155 |
|
MolToProcMap[i] = which_proc; |
156 |
|
AtomsPerProc[which_proc] += add_atoms; |
157 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
158 |
< |
atomIndex++; |
159 |
< |
AtomToProcMap[atomIndex] = which_proc; |
158 |
> |
AtomToProcMap[atomIndex] = which_proc; |
159 |
> |
atomIndex++; |
160 |
|
} |
161 |
|
done = 1; |
162 |
|
continue; |
163 |
|
} |
164 |
|
|
165 |
+ |
if (old_atoms >= nTarget) continue; |
166 |
+ |
|
167 |
+ |
// If we can add this molecule to this processor without sending |
168 |
+ |
// it above nTarget, then go ahead and do it: |
169 |
+ |
|
170 |
+ |
if (new_atoms <= nTarget) { |
171 |
+ |
MolToProcMap[i] = which_proc; |
172 |
+ |
AtomsPerProc[which_proc] += add_atoms; |
173 |
+ |
for (j = 0 ; j < add_atoms; j++ ) { |
174 |
+ |
AtomToProcMap[atomIndex] = which_proc; |
175 |
+ |
atomIndex++; |
176 |
+ |
} |
177 |
+ |
done = 1; |
178 |
+ |
continue; |
179 |
+ |
} |
180 |
+ |
|
181 |
+ |
|
182 |
|
// The only situation left is where old_atoms < nTarget, but |
183 |
|
// new_atoms > nTarget. We want to accept this with some |
184 |
|
// probability that dies off the farther we are from nTarget |
194 |
|
MolToProcMap[i] = which_proc; |
195 |
|
AtomsPerProc[which_proc] += add_atoms; |
196 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
197 |
< |
atomIndex++; |
198 |
< |
AtomToProcMap[atomIndex] = which_proc; |
199 |
< |
} |
197 |
> |
AtomToProcMap[atomIndex] = which_proc; |
198 |
> |
atomIndex++; |
199 |
> |
} |
200 |
|
done = 1; |
201 |
|
continue; |
202 |
|
} else { |
208 |
|
|
209 |
|
// Spray out this nonsense to all other processors: |
210 |
|
|
211 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
211 |
> |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
212 |
|
MPI_INT, 0); |
213 |
|
|
214 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
214 |
> |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
215 |
|
MPI_INT, 0); |
216 |
|
|
217 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
217 |
> |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
218 |
|
MPI_INT, 0); |
219 |
|
|
220 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
220 |
> |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
221 |
|
MPI_INT, 0); |
222 |
|
} else { |
223 |
|
|
224 |
|
// Listen to your marching orders from processor 0: |
225 |
|
|
226 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
226 |
> |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
227 |
|
MPI_INT, 0); |
228 |
|
|
229 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
229 |
> |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
230 |
|
MPI_INT, 0); |
231 |
|
|
232 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
232 |
> |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
233 |
|
MPI_INT, 0); |
234 |
|
|
235 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
235 |
> |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
236 |
|
MPI_INT, 0); |
237 |
+ |
|
238 |
+ |
|
239 |
|
} |
240 |
|
|
241 |
|
|
255 |
|
} |
256 |
|
} |
257 |
|
|
258 |
+ |
std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n"; |
259 |
+ |
|
260 |
|
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
261 |
|
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
262 |
|
|
289 |
|
local_index = 0; |
290 |
|
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
291 |
|
if (AtomToProcMap[i] == mpiPlug->myNode) { |
285 |
– |
local_index++; |
292 |
|
globalIndex[local_index] = i; |
293 |
+ |
local_index++; |
294 |
|
} |
295 |
|
} |
296 |
< |
|
296 |
> |
|
297 |
|
return globalIndex; |
298 |
|
} |
299 |
|
|