# | Line 85 | Line 85 | int* mpiSimulation::divideLabor( void ){ | |
---|---|---|
85 | ||
86 | myRandom = new randomSPRNG( baseSeed ); | |
87 | ||
88 | < | a = (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
88 | > | a = 3.0 * (double)mpiPlug->nMolGlobal / (double)mpiPlug->nAtomsGlobal; |
89 | ||
90 | // Initialize things that we'll send out later: | |
91 | for (i = 0; i < mpiPlug->numberProcessors; i++ ) { | |
# | Line 136 | Line 136 | int* mpiSimulation::divideLabor( void ){ | |
136 | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); | |
137 | new_atoms = old_atoms + add_atoms; | |
138 | ||
139 | – | // If the processor already had too many atoms, just skip this |
140 | – | // processor and try again. |
141 | – | |
139 | // If we've been through this loop too many times, we need | |
140 | // to just give up and assign the molecule to this processor | |
141 | // and be done with it. | |
# | Line 161 | Line 158 | int* mpiSimulation::divideLabor( void ){ | |
158 | done = 1; | |
159 | continue; | |
160 | } | |
164 | – | |
165 | – | if (old_atoms >= nTarget) continue; |
161 | ||
162 | // If we can add this molecule to this processor without sending | |
163 | // it above nTarget, then go ahead and do it: | |
# | Line 179 | Line 174 | int* mpiSimulation::divideLabor( void ){ | |
174 | } | |
175 | ||
176 | ||
177 | < | // The only situation left is where old_atoms < nTarget, but |
178 | < | // new_atoms > nTarget. We want to accept this with some |
179 | < | // probability that dies off the farther we are from nTarget |
177 | > | // The only situation left is when new_atoms > nTarget. We |
178 | > | // want to accept this with some probability that dies off the |
179 | > | // farther we are from nTarget |
180 | ||
181 | // roughly: x = new_atoms - nTarget | |
182 | // Pacc(x) = exp(- a * x) | |
183 | < | // where a = 1 / (average atoms per molecule) |
183 | > | // where a = penalty / (average atoms per molecule) |
184 | ||
185 | x = (double) (new_atoms - nTarget); | |
186 | y = myRandom->getRandom(); | |
187 | < | |
188 | < | if (exp(- a * x) > y) { |
187 | > | |
188 | > | if (y < exp(- a * x)) { |
189 | MolToProcMap[i] = which_proc; | |
190 | AtomsPerProc[which_proc] += add_atoms; | |
191 | for (j = 0 ; j < add_atoms; j++ ) { | |
# | Line 255 | Line 250 | int* mpiSimulation::divideLabor( void ){ | |
250 | } | |
251 | } | |
252 | ||
258 | – | std::cerr << "proc = " << mpiPlug->myNode << " atoms = " << natoms_local << "\n"; |
259 | – | |
253 | MPI::COMM_WORLD.Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM); | |
254 | MPI::COMM_WORLD.Allreduce(&natoms_local,&natoms_global,1,MPI_INT,MPI_SUM); | |
255 | ||
# | Line 303 | Line 296 | void mpiSimulation::mpiRefresh( void ){ | |
296 | int isError, i; | |
297 | int *globalIndex = new int[mpiPlug->myNlocal]; | |
298 | ||
299 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
299 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 languages to |
300 | > | // not barf |
301 | ||
302 | + | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex()+1; |
303 | + | |
304 | ||
305 | isError = 0; | |
306 | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |