25 |
|
|
26 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
27 |
|
MolComponentType = new int[entryPlug->n_mol]; |
28 |
– |
|
28 |
|
AtomToProcMap = new int[entryPlug->n_atoms]; |
29 |
|
|
30 |
|
mpiSim = this; |
34 |
|
|
35 |
|
mpiSimulation::~mpiSimulation(){ |
36 |
|
|
37 |
+ |
delete[] MolToProcMap; |
38 |
+ |
delete[] MolComponentType; |
39 |
+ |
delete[] AtomToProcMap; |
40 |
+ |
|
41 |
|
delete mpiPlug; |
42 |
|
// perhaps we should let fortran know the party is over. |
43 |
|
|
149 |
|
MolToProcMap[i] = which_proc; |
150 |
|
AtomsPerProc[which_proc] += add_atoms; |
151 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
152 |
< |
atomIndex++; |
153 |
< |
AtomToProcMap[atomIndex] = which_proc; |
152 |
> |
AtomToProcMap[atomIndex] = which_proc; |
153 |
> |
atomIndex++; |
154 |
|
} |
155 |
|
done = 1; |
156 |
|
continue; |
172 |
|
MolToProcMap[i] = which_proc; |
173 |
|
AtomsPerProc[which_proc] += add_atoms; |
174 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
175 |
< |
atomIndex++; |
176 |
< |
AtomToProcMap[atomIndex] = which_proc; |
175 |
> |
AtomToProcMap[atomIndex] = which_proc; |
176 |
> |
atomIndex++; |
177 |
|
} |
178 |
|
done = 1; |
179 |
|
continue; |
194 |
|
MolToProcMap[i] = which_proc; |
195 |
|
AtomsPerProc[which_proc] += add_atoms; |
196 |
|
for (j = 0 ; j < add_atoms; j++ ) { |
197 |
< |
atomIndex++; |
198 |
< |
AtomToProcMap[atomIndex] = which_proc; |
199 |
< |
} |
197 |
> |
AtomToProcMap[atomIndex] = which_proc; |
198 |
> |
atomIndex++; |
199 |
> |
} |
200 |
|
done = 1; |
201 |
|
continue; |
202 |
|
} else { |
208 |
|
|
209 |
|
// Spray out this nonsense to all other processors: |
210 |
|
|
211 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
211 |
> |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
212 |
|
MPI_INT, 0); |
213 |
|
|
214 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
214 |
> |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
215 |
|
MPI_INT, 0); |
216 |
|
|
217 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
217 |
> |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
218 |
|
MPI_INT, 0); |
219 |
|
|
220 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
220 |
> |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
221 |
|
MPI_INT, 0); |
222 |
|
} else { |
223 |
|
|
224 |
|
// Listen to your marching orders from processor 0: |
225 |
|
|
226 |
< |
MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
226 |
> |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
227 |
|
MPI_INT, 0); |
228 |
|
|
229 |
< |
MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
229 |
> |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
230 |
|
MPI_INT, 0); |
231 |
|
|
232 |
< |
MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
232 |
> |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
233 |
|
MPI_INT, 0); |
234 |
|
|
235 |
< |
MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
235 |
> |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
236 |
|
MPI_INT, 0); |
237 |
|
} |
238 |
|
|
285 |
|
local_index = 0; |
286 |
|
for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { |
287 |
|
if (AtomToProcMap[i] == mpiPlug->myNode) { |
285 |
– |
local_index++; |
288 |
|
globalIndex[local_index] = i; |
289 |
+ |
local_index++; |
290 |
|
} |
291 |
|
} |
292 |
|
|