4 |
|
#include <cstring> |
5 |
|
#include <cmath> |
6 |
|
#include <mpi.h> |
7 |
– |
#include <mpi++.h> |
7 |
|
|
8 |
|
#include "mpiSimulation.hpp" |
9 |
|
#include "simError.h" |
19 |
|
entryPlug = the_entryPlug; |
20 |
|
mpiPlug = new mpiSimData; |
21 |
|
|
22 |
< |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
22 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
23 |
|
mpiPlug->myNode = worldRank; |
24 |
|
|
25 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
202 |
|
|
203 |
|
// Spray out this nonsense to all other processors: |
204 |
|
|
205 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 |
< |
MPI_INT, 0); |
205 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
206 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
207 |
|
|
208 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 |
< |
MPI_INT, 0); |
208 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
209 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
210 |
|
|
211 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 |
< |
MPI_INT, 0); |
211 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
212 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
213 |
|
|
214 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 |
< |
MPI_INT, 0); |
214 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
215 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
216 |
|
} else { |
217 |
|
|
218 |
|
// Listen to your marching orders from processor 0: |
219 |
|
|
220 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 |
< |
MPI_INT, 0); |
222 |
< |
|
223 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 |
< |
MPI_INT, 0); |
220 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
221 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
222 |
> |
|
223 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
224 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
225 |
|
|
226 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 |
< |
MPI_INT, 0); |
226 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
227 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
228 |
|
|
229 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 |
< |
MPI_INT, 0); |
229 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
230 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
231 |
|
|
232 |
|
|
233 |
|
} |
249 |
|
} |
250 |
|
} |
251 |
|
|
252 |
< |
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
253 |
< |
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
252 |
> |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
253 |
> |
MPI_COMM_WORLD); |
254 |
> |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
255 |
> |
MPI_SUM, MPI_COMM_WORLD); |
256 |
|
|
257 |
|
if( nmol_global != entryPlug->n_mol ){ |
258 |
|
sprintf( painCave.errMsg, |
297 |
|
int isError, i; |
298 |
|
int *globalIndex = new int[mpiPlug->myNlocal]; |
299 |
|
|
300 |
< |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
300 |
> |
// Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
301 |
> |
// not barf |
302 |
|
|
303 |
+ |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
304 |
+ |
|
305 |
|
|
306 |
|
isError = 0; |
307 |
|
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |