# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | #include <iostream> | |
3 | < | #include <cstdlib> |
4 | < | #include <cstring> |
5 | < | #include <cmath> |
3 | > | #include <stdlib.h> |
4 | > | #include <string.h> |
5 | > | #include <math.h> |
6 | #include <mpi.h> | |
7 | ||
8 | #include "mpiSimulation.hpp" | |
# | Line 10 | Line 10 | |
10 | #include "fortranWrappers.hpp" | |
11 | #include "randomSPRNG.hpp" | |
12 | ||
13 | – | #define BASE_SEED 123456789 |
14 | – | |
13 | mpiSimulation* mpiSim; | |
14 | ||
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
# | Line 19 | Line 17 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
17 | entryPlug = the_entryPlug; | |
18 | mpiPlug = new mpiSimData; | |
19 | ||
20 | < | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->numberProcessors) ); |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(mpiPlug->nProcessors) ); |
21 | mpiPlug->myNode = worldRank; | |
22 | ||
23 | MolToProcMap = new int[entryPlug->n_mol]; | |
# | Line 42 | Line 40 | mpiSimulation::~mpiSimulation(){ | |
40 | ||
41 | } | |
42 | ||
43 | < | int* mpiSimulation::divideLabor( void ){ |
43 | > | void mpiSimulation::divideLabor( ){ |
44 | ||
47 | – | int* globalIndex; |
48 | – | |
45 | int nComponents; | |
46 | MoleculeStamp** compStamps; | |
47 | randomSPRNG *myRandom; | |
# | Line 59 | Line 55 | int* mpiSimulation::divideLabor( void ){ | |
55 | int old_atoms, add_atoms, new_atoms; | |
56 | ||
57 | int nTarget; | |
58 | < | int molIndex, atomIndex, compIndex, compStart; |
58 | > | int molIndex, atomIndex; |
59 | int done; | |
64 | – | int nLocal, molLocal; |
60 | int i, j, loops, which_proc, nmol_local, natoms_local; | |
61 | int nmol_global, natoms_global; | |
62 | < | int local_index, index; |
68 | < | int smallDiff, bigDiff; |
62 | > | int local_index; |
63 | int baseSeed = entryPlug->getSeed(); | |
64 | ||
71 | – | int testSum; |
72 | – | |
65 | nComponents = entryPlug->nComponents; | |
66 | compStamps = entryPlug->compStamps; | |
67 | componentsNmol = entryPlug->componentsNmol; | |
68 | < | AtomsPerProc = new int[mpiPlug->numberProcessors]; |
68 | > | AtomsPerProc = new int[mpiPlug->nProcessors]; |
69 | ||
70 | mpiPlug->nAtomsGlobal = entryPlug->n_atoms; | |
71 | mpiPlug->nBondsGlobal = entryPlug->n_bonds; | |
# | Line 81 | Line 73 | int* mpiSimulation::divideLabor( void ){ | |
73 | mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; | |
74 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
75 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
76 | + | mpiPlug->nGroupsGlobal = entryPlug->ngroup; |
77 | ||
85 | – | |
78 | myRandom = new randomSPRNG( baseSeed ); | |
79 | ||
80 | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; | |
81 | ||
82 | // Initialize things that we'll send out later: | |
83 | < | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { |
83 | > | for (i = 0; i < mpiPlug->nProcessors; i++ ) { |
84 | AtomsPerProc[i] = 0; | |
85 | } | |
86 | for (i = 0; i < mpiPlug->nMolGlobal; i++ ) { | |
# | Line 103 | Line 95 | int* mpiSimulation::divideLabor( void ){ | |
95 | ||
96 | if (mpiPlug->myNode == 0) { | |
97 | numerator = (double) entryPlug->n_atoms; | |
98 | < | denominator = (double) mpiPlug->numberProcessors; |
98 | > | denominator = (double) mpiPlug->nProcessors; |
99 | precast = numerator / denominator; | |
100 | nTarget = (int)( precast + 0.5 ); | |
101 | ||
# | Line 128 | Line 120 | int* mpiSimulation::divideLabor( void ){ | |
120 | ||
121 | // Pick a processor at random | |
122 | ||
123 | < | which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
123 | > | which_proc = (int) (myRandom->getRandom() * mpiPlug->nProcessors); |
124 | ||
125 | // How many atoms does this processor have? | |
126 | ||
# | Line 212 | Line 204 | int* mpiSimulation::divideLabor( void ){ | |
204 | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, | |
205 | MPI_INT, 0, MPI_COMM_WORLD); | |
206 | ||
207 | < | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
207 | > | MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
208 | MPI_INT, 0, MPI_COMM_WORLD); | |
209 | } else { | |
210 | ||
# | Line 227 | Line 219 | int* mpiSimulation::divideLabor( void ){ | |
219 | MPI_Bcast(MolComponentType, mpiPlug->nMolGlobal, | |
220 | MPI_INT, 0, MPI_COMM_WORLD); | |
221 | ||
222 | < | MPI_Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
222 | > | MPI_Bcast(AtomsPerProc, mpiPlug->nProcessors, |
223 | MPI_INT, 0, MPI_COMM_WORLD); | |
224 | ||
225 | ||
# | Line 277 | Line 269 | int* mpiSimulation::divideLabor( void ){ | |
269 | "Successfully divided the molecules among the processors.\n" ); | |
270 | MPIcheckPoint(); | |
271 | ||
272 | < | mpiPlug->myNMol = nmol_local; |
273 | < | mpiPlug->myNlocal = natoms_local; |
272 | > | mpiPlug->nMolLocal = nmol_local; |
273 | > | mpiPlug->nAtomsLocal = natoms_local; |
274 | ||
275 | < | globalIndex = new int[mpiPlug->myNlocal]; |
275 | > | globalAtomIndex.resize(mpiPlug->nAtomsLocal); |
276 | > | globalToLocalAtom.resize(mpiPlug->nAtomsGlobal); |
277 | local_index = 0; | |
278 | for (i = 0; i < mpiPlug->nAtomsGlobal; i++) { | |
279 | if (AtomToProcMap[i] == mpiPlug->myNode) { | |
280 | < | globalIndex[local_index] = i; |
280 | > | globalAtomIndex[local_index] = i; |
281 | > | |
282 | > | globalToLocalAtom[i] = local_index; |
283 | local_index++; | |
284 | + | |
285 | } | |
286 | + | else |
287 | + | globalToLocalAtom[i] = -1; |
288 | } | |
289 | + | |
290 | + | globalMolIndex.resize(mpiPlug->nMolLocal); |
291 | + | globalToLocalMol.resize(mpiPlug->nMolGlobal); |
292 | ||
293 | < | return globalIndex; |
293 | > | local_index = 0; |
294 | > | for (i = 0; i < mpiPlug->nMolGlobal; i++) { |
295 | > | if (MolToProcMap[i] == mpiPlug->myNode) { |
296 | > | globalMolIndex[local_index] = i; |
297 | > | globalToLocalMol[i] = local_index; |
298 | > | local_index++; |
299 | > | } |
300 | > | else |
301 | > | globalToLocalMol[i] = -1; |
302 | > | } |
303 | > | |
304 | } | |
305 | ||
306 | ||
307 | void mpiSimulation::mpiRefresh( void ){ | |
308 | ||
309 | int isError, i; | |
310 | < | int *globalIndex = new int[mpiPlug->myNlocal]; |
310 | > | int *globalIndex = new int[mpiPlug->nAtomsLocal]; |
311 | ||
312 | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to | |
313 | // not barf | |
314 | ||
315 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
315 | > | for(i=0; i<mpiPlug->nAtomsLocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
316 | ||
317 | ||
318 | isError = 0; |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |