# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | #include <iostream> | |
3 | < | #include <cstdlib> |
4 | < | #include <cstring> |
5 | < | #include <cmath> |
3 | > | #include <stdlib.h> |
4 | > | #include <string.h> |
5 | > | #include <math.h> |
6 | #include <mpi.h> | |
7 | – | #include <mpi++.h> |
7 | ||
8 | #include "mpiSimulation.hpp" | |
9 | #include "simError.h" | |
10 | #include "fortranWrappers.hpp" | |
11 | #include "randomSPRNG.hpp" | |
12 | ||
14 | – | #define BASE_SEED 123456789 |
15 | – | |
13 | mpiSimulation* mpiSim; | |
14 | ||
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
# | Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
17 | entryPlug = the_entryPlug; | |
18 | mpiPlug = new mpiSimData; | |
19 | ||
20 | < | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
21 | mpiPlug->myNode = worldRank; | |
22 | ||
23 | MolToProcMap = new int[entryPlug->n_mol]; | |
# | Line 60 | Line 57 | int* mpiSimulation::divideLabor( void ){ | |
57 | int old_atoms, add_atoms, new_atoms; | |
58 | ||
59 | int nTarget; | |
60 | < | int molIndex, atomIndex, compIndex, compStart; |
60 | > | int molIndex, atomIndex; |
61 | int done; | |
65 | – | int nLocal, molLocal; |
62 | int i, j, loops, which_proc, nmol_local, natoms_local; | |
63 | int nmol_global, natoms_global; | |
64 | < | int local_index, index; |
65 | < | int smallDiff, bigDiff; |
70 | < | int baseSeed = BASE_SEED; |
64 | > | int local_index; |
65 | > | int baseSeed = entryPlug->getSeed(); |
66 | ||
72 | – | int testSum; |
73 | – | |
67 | nComponents = entryPlug->nComponents; | |
68 | compStamps = entryPlug->compStamps; | |
69 | componentsNmol = entryPlug->componentsNmol; | |
# | Line 83 | Line 76 | int* mpiSimulation::divideLabor( void ){ | |
76 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
77 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
78 | ||
79 | + | |
80 | myRandom = new randomSPRNG( baseSeed ); | |
81 | ||
82 | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; | |
# | Line 133 | Line 127 | int* mpiSimulation::divideLabor( void ){ | |
127 | // How many atoms does this processor have? | |
128 | ||
129 | old_atoms = AtomsPerProc[which_proc]; | |
130 | < | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
130 | > | add_atoms = compStamps[MolComponentType[i]]->getTotAtoms(); |
131 | new_atoms = old_atoms + add_atoms; | |
132 | ||
133 | // If we've been through this loop too many times, we need | |
# | Line 203 | Line 197 | int* mpiSimulation::divideLabor( void ){ | |
197 | ||
198 | // Spray out this nonsense to all other processors: | |
199 | ||
200 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
201 | < | MPI_INT, 0); |
200 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
201 | > | MPI_INT, 0, MPI_COMM_WORLD); |
202 | ||
203 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
204 | < | MPI_INT, 0); |
203 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
204 | > | MPI_INT, 0, MPI_COMM_WORLD); |
205 | ||
206 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
207 | < | MPI_INT, 0); |
206 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
207 | > | MPI_INT, 0, MPI_COMM_WORLD); |
208 | ||
209 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
210 | < | MPI_INT, 0); |
209 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
210 | > | MPI_INT, 0, MPI_COMM_WORLD); |
211 | } else { | |
212 | ||
213 | // Listen to your marching orders from processor 0: | |
214 | ||
215 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
216 | < | MPI_INT, 0); |
215 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
216 | > | MPI_INT, 0, MPI_COMM_WORLD); |
217 | ||
218 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
219 | < | MPI_INT, 0); |
218 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
219 | > | MPI_INT, 0, MPI_COMM_WORLD); |
220 | ||
221 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
222 | < | MPI_INT, 0); |
221 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
222 | > | MPI_INT, 0, MPI_COMM_WORLD); |
223 | ||
224 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
225 | < | MPI_INT, 0); |
224 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
225 | > | MPI_INT, 0, MPI_COMM_WORLD); |
226 | ||
227 | ||
228 | } | |
# | Line 250 | Line 244 | int* mpiSimulation::divideLabor( void ){ | |
244 | } | |
245 | } | |
246 | ||
247 | < | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
248 | < | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
247 | > | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
248 | > | MPI_COMM_WORLD); |
249 | > | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
250 | > | MPI_SUM, MPI_COMM_WORLD); |
251 | ||
252 | if( nmol_global != entryPlug->n_mol ){ | |
253 | sprintf( painCave.errMsg, | |
# | Line 296 | Line 292 | void mpiSimulation::mpiRefresh( void ){ | |
292 | int isError, i; | |
293 | int *globalIndex = new int[mpiPlug->myNlocal]; | |
294 | ||
295 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
295 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
296 | > | // not barf |
297 | ||
298 | + | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
299 | + | |
300 | ||
301 | isError = 0; | |
302 | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |