# | Line 2 | Line 2 | |
---|---|---|
2 | ||
3 | #include <cstdlib> | |
4 | #include <cstring> | |
5 | + | #include <cmath> |
6 | #include <mpi.h> | |
7 | #include <mpi++.h> | |
8 | ||
# | Line 10 | Line 11 | |
11 | #include "fortranWrappers.hpp" | |
12 | #include "randomSPRNG.hpp" | |
13 | ||
14 | + | #define BASE_SEED 123456789 |
15 | ||
16 | mpiSimulation* mpiSim; | |
17 | ||
# | Line 33 | Line 35 | mpiSimulation::~mpiSimulation(){ | |
35 | ||
36 | mpiSimulation::~mpiSimulation(){ | |
37 | ||
38 | + | delete[] MolToProcMap; |
39 | + | delete[] MolComponentType; |
40 | + | delete[] AtomToProcMap; |
41 | + | |
42 | delete mpiPlug; | |
43 | // perhaps we should let fortran know the party is over. | |
44 | ||
# | Line 44 | Line 50 | int* mpiSimulation::divideLabor( void ){ | |
50 | ||
51 | int nComponents; | |
52 | MoleculeStamp** compStamps; | |
53 | < | randomSPRNG myRandom; |
53 | > | randomSPRNG *myRandom; |
54 | int* componentsNmol; | |
55 | int* AtomsPerProc; | |
56 | ||
# | Line 58 | Line 64 | int* mpiSimulation::divideLabor( void ){ | |
64 | int molIndex, atomIndex, compIndex, compStart; | |
65 | int done; | |
66 | int nLocal, molLocal; | |
67 | < | int i, index; |
67 | > | int i, j, loops, which_proc, nmol_local, natoms_local; |
68 | > | int nmol_global, natoms_global; |
69 | > | int local_index, index; |
70 | int smallDiff, bigDiff; | |
71 | + | int baseSeed = BASE_SEED; |
72 | ||
73 | int testSum; | |
74 | ||
# | Line 75 | Line 84 | int* mpiSimulation::divideLabor( void ){ | |
84 | mpiPlug->nSRIGlobal = entryPlug->n_SRI; | |
85 | mpiPlug->nMolGlobal = entryPlug->n_mol; | |
86 | ||
87 | < | myRandom = new randomSPRNG(); |
87 | > | myRandom = new randomSPRNG( baseSeed ); |
88 | ||
89 | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; | |
90 | ||
# | Line 120 | Line 129 | int* mpiSimulation::divideLabor( void ){ | |
129 | ||
130 | // Pick a processor at random | |
131 | ||
132 | < | which_proc = (int) (myRandom.getRandom() * mpiPlug->numberProcessors); |
132 | > | which_proc = (int) (myRandom->getRandom() * mpiPlug->numberProcessors); |
133 | ||
134 | // How many atoms does this processor have? | |
135 | ||
# | Line 131 | Line 140 | int* mpiSimulation::divideLabor( void ){ | |
140 | ||
141 | if (old_atoms >= nTarget) continue; | |
142 | ||
143 | < | add_atoms = compStamps[MolComponentType[i]]->getNatoms(); |
143 | > | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
144 | new_atoms = old_atoms + add_atoms; | |
145 | ||
146 | // If we can add this molecule to this processor without sending | |
# | Line 180 | Line 189 | int* mpiSimulation::divideLabor( void ){ | |
189 | // where a = 1 / (average atoms per molecule) | |
190 | ||
191 | x = (double) (new_atoms - nTarget); | |
192 | < | y = myRandom.getRandom(); |
192 | > | y = myRandom->getRandom(); |
193 | ||
194 | if (exp(- a * x) > y) { | |
195 | MolToProcMap[i] = which_proc; | |
# | Line 200 | Line 209 | int* mpiSimulation::divideLabor( void ){ | |
209 | ||
210 | // Spray out this nonsense to all other processors: | |
211 | ||
212 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
212 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
213 | MPI_INT, 0); | |
214 | ||
215 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
215 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
216 | MPI_INT, 0); | |
217 | ||
218 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
218 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
219 | MPI_INT, 0); | |
220 | ||
221 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
221 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
222 | MPI_INT, 0); | |
223 | } else { | |
224 | ||
225 | // Listen to your marching orders from processor 0: | |
226 | ||
227 | < | MPI::COMM_WORLD.Bcast(&MolToProcMap, mpiPlug->nMolGlobal, |
227 | > | MPI::COMM_WORLD.Bcast(MolToProcMap, mpiPlug->nMolGlobal, |
228 | MPI_INT, 0); | |
229 | ||
230 | < | MPI::COMM_WORLD.Bcast(&AtomToProcMap, mpiPlug->nAtomsGlobal, |
230 | > | MPI::COMM_WORLD.Bcast(AtomToProcMap, mpiPlug->nAtomsGlobal, |
231 | MPI_INT, 0); | |
232 | ||
233 | < | MPI::COMM_WORLD.Bcast(&MolComponentType, mpiPlug->nMolGlobal, |
233 | > | MPI::COMM_WORLD.Bcast(MolComponentType, mpiPlug->nMolGlobal, |
234 | MPI_INT, 0); | |
235 | ||
236 | < | MPI::COMM_WORLD.Bcast(&AtomsPerProc, mpiPlug->numberProcessors, |
236 | > | MPI::COMM_WORLD.Bcast(AtomsPerProc, mpiPlug->numberProcessors, |
237 | MPI_INT, 0); | |
238 | } | |
239 | ||
# | Line 281 | Line 290 | int* mpiSimulation::divideLabor( void ){ | |
290 | globalIndex[local_index] = i; | |
291 | } | |
292 | } | |
293 | < | |
294 | < | |
286 | < | |
287 | < | |
288 | < | index = mpiPlug->myAtomStart; |
289 | < | // for( i=0; i<mpiPlug->myNlocal; i++){ |
290 | < | // globalIndex[i] = index; |
291 | < | // index++; |
292 | < | // } |
293 | < | |
294 | < | // return globalIndex; |
293 | > | |
294 | > | return globalIndex; |
295 | } | |
296 | ||
297 |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |