4 |
|
#include <cstring> |
5 |
|
#include <cmath> |
6 |
|
#include <mpi.h> |
7 |
– |
#include <mpi++.h> |
7 |
|
|
8 |
|
#include "mpiSimulation.hpp" |
9 |
|
#include "simError.h" |
10 |
|
#include "fortranWrappers.hpp" |
11 |
|
#include "randomSPRNG.hpp" |
12 |
|
|
14 |
– |
#define BASE_SEED 123456789 |
15 |
– |
|
13 |
|
mpiSimulation* mpiSim; |
14 |
|
|
15 |
|
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
17 |
|
entryPlug = the_entryPlug; |
18 |
|
mpiPlug = new mpiSimData; |
19 |
|
|
20 |
< |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
20 |
> |
MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
21 |
|
mpiPlug->myNode = worldRank; |
22 |
|
|
23 |
|
MolToProcMap = new int[entryPlug->n_mol]; |
64 |
|
int nmol_global, natoms_global; |
65 |
|
int local_index, index; |
66 |
|
int smallDiff, bigDiff; |
67 |
< |
int baseSeed = BASE_SEED; |
67 |
> |
int baseSeed = entryPlug->getSeed(); |
68 |
|
|
69 |
|
int testSum; |
70 |
|
|
80 |
|
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
81 |
|
mpiPlug->nMolGlobal = entryPlug->n_mol; |
82 |
|
|
83 |
+ |
|
84 |
|
myRandom = new randomSPRNG( baseSeed ); |
85 |
|
|
86 |
|
a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
201 |
|
|
202 |
|
// Spray out this nonsense to all other processors: |
203 |
|
|
204 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
205 |
< |
MPI_INT, 0); |
204 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
205 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
206 |
|
|
207 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
208 |
< |
MPI_INT, 0); |
207 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
208 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
209 |
|
|
210 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
211 |
< |
MPI_INT, 0); |
210 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
211 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
212 |
|
|
213 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
214 |
< |
MPI_INT, 0); |
213 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
214 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
215 |
|
} else { |
216 |
|
|
217 |
|
// Listen to your marching orders from processor 0: |
218 |
|
|
219 |
< |
MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
220 |
< |
MPI_INT, 0); |
219 |
> |
MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
220 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
221 |
|
|
222 |
< |
MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
223 |
< |
MPI_INT, 0); |
222 |
> |
MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
223 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
224 |
|
|
225 |
< |
MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
226 |
< |
MPI_INT, 0); |
225 |
> |
MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
226 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
227 |
|
|
228 |
< |
MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
229 |
< |
MPI_INT, 0); |
228 |
> |
MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
229 |
> |
MPI_INT, 0, MPI_COMM_WORLD); |
230 |
|
|
231 |
|
|
232 |
|
} |
248 |
|
} |
249 |
|
} |
250 |
|
|
251 |
< |
MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
252 |
< |
MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
251 |
> |
MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
252 |
> |
MPI_COMM_WORLD); |
253 |
> |
MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
254 |
> |
MPI_SUM, MPI_COMM_WORLD); |
255 |
|
|
256 |
|
if( nmol_global != entryPlug->n_mol ){ |
257 |
|
sprintf( painCave.errMsg, |