# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | #include <iostream> | |
3 | < | #include <cstdlib> |
4 | < | #include <cstring> |
5 | < | #include <cmath> |
3 | > | #include <stdlib.h> |
4 | > | #include <string.h> |
5 | > | #include <math.h> |
6 | #include <mpi.h> | |
7 | – | #include <mpi++.h> |
7 | ||
8 | #include "mpiSimulation.hpp" | |
9 | #include "simError.h" | |
10 | #include "fortranWrappers.hpp" | |
11 | #include "randomSPRNG.hpp" | |
12 | ||
14 | – | #define BASE_SEED 123456789 |
15 | – | |
13 | mpiSimulation* mpiSim; | |
14 | ||
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
# | Line 20 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
17 | entryPlug = the_entryPlug; | |
18 | mpiPlug = new mpiSimData; | |
19 | ||
20 | < | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
21 | mpiPlug->myNode = worldRank; | |
22 | ||
23 | MolToProcMap = new int[entryPlug->n_mol]; | |
# | Line 43 | Line 40 | mpiSimulation::~mpiSimulation(){ | |
40 | ||
41 | } | |
42 | ||
43 | < | int* mpiSimulation::divideLabor( void ){ |
43 | > | void mpiSimulation::divideLabor( ){ |
44 | ||
48 | – | int* globalIndex; |
49 | – | |
45 | int nComponents; | |
46 | MoleculeStamp** compStamps; | |
47 | randomSPRNG *myRandom; | |
# | Line 60 | Line 55 | int* mpiSimulation::divideLabor( void ){ | |
55 | int old_atoms, add_atoms, new_atoms; | |
56 | ||
57 | int nTarget; | |
58 | < | int molIndex, atomIndex, compIndex, compStart; |
58 | > | int molIndex, atomIndex; |
59 | int done; | |
65 | – | int nLocal, molLocal; |
60 | int i, j, loops, which_proc, nmol_local, natoms_local; | |
61 | int nmol_global, natoms_global; | |
62 | < | int local_index, index; |
63 | < | int smallDiff, bigDiff; |
70 | < | int baseSeed = BASE_SEED; |
62 | > | int local_index; |
63 | > | int baseSeed = entryPlug->getSeed(); |
64 | ||
72 | – | int testSum; |
73 | – | |
65 | nComponents = entryPlug->nComponents; | |
66 | compStamps = entryPlug->compStamps; | |
67 | componentsNmol = entryPlug->componentsNmol; | |
# | Line 203 | Line 194 | int* mpiSimulation::divideLabor( void ){ | |
194 | ||
195 | // Spray out this nonsense to all other processors: | |
196 | ||
197 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
198 | < | MPI_INT, 0); |
197 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
198 | > | MPI_INT, 0, MPI_COMM_WORLD); |
199 | ||
200 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
201 | < | MPI_INT, 0); |
200 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
201 | > | MPI_INT, 0, MPI_COMM_WORLD); |
202 | ||
203 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
204 | < | MPI_INT, 0); |
203 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
204 | > | MPI_INT, 0, MPI_COMM_WORLD); |
205 | ||
206 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
207 | < | MPI_INT, 0); |
206 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
207 | > | MPI_INT, 0, MPI_COMM_WORLD); |
208 | } else { | |
209 | ||
210 | // Listen to your marching orders from processor 0: | |
211 | ||
212 | < | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
213 | < | MPI_INT, 0); |
212 | > | MPI_Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
213 | > | MPI_INT, 0, MPI_COMM_WORLD); |
214 | ||
215 | < | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
216 | < | MPI_INT, 0); |
215 | > | MPI_Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
216 | > | MPI_INT, 0, MPI_COMM_WORLD); |
217 | ||
218 | < | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
219 | < | MPI_INT, 0); |
218 | > | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, |
219 | > | MPI_INT, 0, MPI_COMM_WORLD); |
220 | ||
221 | < | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
222 | < | MPI_INT, 0); |
221 | > | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
222 | > | MPI_INT, 0, MPI_COMM_WORLD); |
223 | ||
224 | ||
225 | } | |
# | Line 250 | Line 241 | int* mpiSimulation::divideLabor( void ){ | |
241 | } | |
242 | } | |
243 | ||
244 | < | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); |
245 | < | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); |
244 | > | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
245 | > | MPI_COMM_WORLD); |
246 | > | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
247 | > | MPI_SUM, MPI_COMM_WORLD); |
248 | ||
249 | if( nmol_global != entryPlug->n_mol ){ | |
250 | sprintf( painCave.errMsg, | |
# | Line 278 | Line 271 | int* mpiSimulation::divideLabor( void ){ | |
271 | mpiPlug->myNMol = nmol_local; | |
272 | mpiPlug->myNlocal = natoms_local; | |
273 | ||
274 | < | globalIndex = new int[mpiPlug->myNlocal]; |
274 | > | globalAtomIndex.resize(mpiPlug->myNlocal); |
275 | local_index = 0; | |
276 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
277 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
278 | < | globalIndex[local_index] = i; |
278 | > | globalAtomIndex[local_index] = i; |
279 | > | |
280 | > | globalToLocalAtom[i] = local_index; |
281 | local_index++; | |
282 | + | |
283 | } | |
284 | + | else |
285 | + | globalToLocalAtom[i] = -1; |
286 | } | |
287 | + | |
288 | + | globalMolIndex.resize(mpiPlug->myNMol); |
289 | + | local_index = 0; |
290 | + | for (i = 0; i < mpiPlug->nMolGlobal; i++) { |
291 | + | if (MolToProcMap[i] == mpiPlug->myNode) { |
292 | + | globalMolIndex[local_index] = i; |
293 | + | globalToLocalMol[i] = local_index; |
294 | + | local_index++; |
295 | + | } |
296 | + | else |
297 | + | globalToLocalMol[i] = -1; |
298 | + | } |
299 | ||
290 | – | return globalIndex; |
300 | } | |
301 | ||
302 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |