# | Line 1 | Line 1 | |
---|---|---|
1 | #ifdef IS_MPI | |
2 | < | |
3 | < | #include <cstdlib> |
4 | < | #include <cstring> |
2 | > | #include <iostream> |
3 | > | #include <stdlib.h> |
4 | > | #include <string.h> |
5 | > | #include <math.h> |
6 | #include <mpi.h> | |
6 | – | #include <mpi++.h> |
7 | ||
8 | #include "mpiSimulation.hpp" | |
9 | #include "simError.h" | |
10 | #include "fortranWrappers.hpp" | |
11 | + | #include "randomSPRNG.hpp" |
12 | ||
12 | – | |
13 | – | |
14 | – | |
13 | mpiSimulation* mpiSim; | |
14 | ||
15 | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) | |
16 | { | |
17 | entryPlug = the_entryPlug; | |
18 | < | mpiPlug = new mpiSimData; |
18 | > | parallelData = new mpiSimData; |
19 | ||
20 | < | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
21 | < | mpiPlug->myNode = worldRank; |
22 | < | |
20 | > | MPI_Comm_size(MPI_COMM_WORLD, &(parallelData->nProcessors) ); |
21 | > | parallelData->myNode = worldRank; |
22 | > | |
23 | > | MolToProcMap = new int[entryPlug->n_mol]; |
24 | > | MolComponentType = new int[entryPlug->n_mol]; |
25 | > | AtomToProcMap = new int[entryPlug->n_atoms]; |
26 | > | GroupToProcMap = new int[entryPlug->ngroup]; |
27 | > | |
28 | mpiSim = this; | |
29 | wrapMeSimParallel( this ); | |
30 | } | |
# | Line 29 | Line 32 | mpiSimulation::~mpiSimulation(){ | |
32 | ||
33 | mpiSimulation::~mpiSimulation(){ | |
34 | ||
35 | < | delete mpiPlug; |
35 | > | delete[] MolToProcMap; |
36 | > | delete[] MolComponentType; |
37 | > | delete[] AtomToProcMap; |
38 | > | delete[] GroupToProcMap; |
39 | > | |
40 | > | delete parallelData; |
41 | // perhaps we should let fortran know the party is over. | |
42 | ||
43 | } | |
44 | ||
45 | + | void mpiSimulation::divideLabor( ){ |
46 | ||
38 | – | |
39 | – | int* mpiSimulation::divideLabor( void ){ |
40 | – | |
41 | – | int* globalIndex; |
42 | – | |
47 | int nComponents; | |
48 | MoleculeStamp** compStamps; | |
49 | + | randomSPRNG *myRandom; |
50 | int* componentsNmol; | |
51 | + | int* AtomsPerProc; |
52 | + | int* GroupsPerProc; |
53 | ||
54 | double numerator; | |
55 | double denominator; | |
56 | double precast; | |
57 | + | double x, y, a; |
58 | + | int old_atoms, add_atoms, new_atoms; |
59 | + | int old_groups, add_groups, new_groups; |
60 | ||
61 | int nTarget; | |
62 | < | int molIndex, atomIndex, compIndex, compStart; |
62 | > | int molIndex, atomIndex, groupIndex; |
63 | int done; | |
64 | < | int nLocal, molLocal; |
65 | < | int i, index; |
66 | < | int smallDiff, bigDiff; |
64 | > | int i, j, loops, which_proc; |
65 | > | int nmol_global, nmol_local; |
66 | > | int ngroups_global, ngroups_local; |
67 | > | int natoms_global, natoms_local; |
68 | > | int ncutoff_groups, nAtomsInGroups; |
69 | > | int local_index; |
70 | > | int baseSeed = entryPlug->getSeed(); |
71 | > | CutoffGroupStamp* cg; |
72 | ||
58 | – | int testSum; |
59 | – | |
73 | nComponents = entryPlug->nComponents; | |
74 | compStamps = entryPlug->compStamps; | |
75 | componentsNmol = entryPlug->componentsNmol; | |
76 | + | AtomsPerProc = new int[parallelData->nProcessors]; |
77 | + | GroupsPerProc = new int[parallelData->nProcessors]; |
78 | + | |
79 | + | parallelData->nAtomsGlobal = entryPlug->n_atoms; |
80 | + | parallelData->nBondsGlobal = entryPlug->n_bonds; |
81 | + | parallelData->nBendsGlobal = entryPlug->n_bends; |
82 | + | parallelData->nTorsionsGlobal = entryPlug->n_torsions; |
83 | + | parallelData->nSRIGlobal = entryPlug->n_SRI; |
84 | + | parallelData->nGroupsGlobal = entryPlug->ngroup; |
85 | + | parallelData->nMolGlobal = entryPlug->n_mol; |
86 | ||
87 | < | mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
65 | < | mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
66 | < | mpiPlug->nBendsGlobal = entryPlug->n_bends; |
67 | < | mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
68 | < | mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
69 | < | mpiPlug->nMolGlobal = entryPlug->n_mol; |
87 | > | myRandom = new randomSPRNG( baseSeed ); |
88 | ||
89 | < | numerator = (double) entryPlug->n_atoms; |
90 | < | denominator = (double) mpiPlug->numberProcessors; |
91 | < | precast = numerator / denominator; |
92 | < | nTarget = (int)( precast + 0.5 ); |
93 | < | |
94 | < | molIndex = 0; |
95 | < | atomIndex = 0; |
96 | < | compIndex = 0; |
97 | < | compStart = 0; |
98 | < | for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
89 | > | a = 3.0 * (double)parallelData->nMolGlobal / (double)parallelData->nAtomsGlobal; |
90 | > | |
91 | > | // Initialize things that we'll send out later: |
92 | > | for (i = 0; i < parallelData->nProcessors; i++ ) { |
93 | > | AtomsPerProc[i] = 0; |
94 | > | GroupsPerProc[i] = 0; |
95 | > | } |
96 | > | for (i = 0; i < parallelData->nMolGlobal; i++ ) { |
97 | > | // default to an error condition: |
98 | > | MolToProcMap[i] = -1; |
99 | > | MolComponentType[i] = -1; |
100 | > | } |
101 | > | for (i = 0; i < parallelData->nAtomsGlobal; i++ ) { |
102 | > | // default to an error condition: |
103 | > | AtomToProcMap[i] = -1; |
104 | > | } |
105 | > | for (i = 0; i < parallelData->nGroupsGlobal; i++ ) { |
106 | > | // default to an error condition: |
107 | > | GroupToProcMap[i] = -1; |
108 | > | } |
109 | ||
110 | < | done = 0; |
111 | < | nLocal = 0; |
112 | < | molLocal = 0; |
110 | > | if (parallelData->myNode == 0) { |
111 | > | numerator = (double) entryPlug->n_atoms; |
112 | > | denominator = (double) parallelData->nProcessors; |
113 | > | precast = numerator / denominator; |
114 | > | nTarget = (int)( precast + 0.5 ); |
115 | ||
116 | < | if( i == mpiPlug->myNode ){ |
117 | < | mpiPlug->myMolStart = molIndex; |
118 | < | mpiPlug->myAtomStart = atomIndex; |
116 | > | // Build the array of molecule component types first |
117 | > | molIndex = 0; |
118 | > | for (i=0; i < nComponents; i++) { |
119 | > | for (j=0; j < componentsNmol[i]; j++) { |
120 | > | MolComponentType[molIndex] = i; |
121 | > | molIndex++; |
122 | > | } |
123 | } | |
124 | + | |
125 | + | atomIndex = 0; |
126 | + | groupIndex = 0; |
127 | + | |
128 | + | for (i = 0; i < molIndex; i++ ) { |
129 | + | |
130 | + | done = 0; |
131 | + | loops = 0; |
132 | + | |
133 | + | while( !done ){ |
134 | + | loops++; |
135 | + | |
136 | + | // Pick a processor at random |
137 | + | |
138 | + | which_proc = (int) (myRandom->getRandom() * parallelData->nProcessors); |
139 | + | |
140 | + | // How many atoms does this processor have? |
141 | + | |
142 | + | old_atoms = AtomsPerProc[which_proc]; |
143 | + | add_atoms = compStamps[MolComponentType[i]]->getNAtoms(); |
144 | + | new_atoms = old_atoms + add_atoms; |
145 | + | |
146 | + | old_groups = GroupsPerProc[which_proc]; |
147 | + | ncutoff_groups = compStamps[MolComponentType[i]]->getNCutoffGroups(); |
148 | + | nAtomsInGroups = 0; |
149 | + | for (j = 0; j < ncutoff_groups; j++) { |
150 | + | cg = compStamps[MolComponentType[i]]->getCutoffGroup(j); |
151 | + | nAtomsInGroups += cg->getNMembers(); |
152 | + | } |
153 | + | add_groups = add_atoms - nAtomsInGroups + ncutoff_groups; |
154 | + | new_groups = old_groups + add_groups; |
155 | + | |
156 | + | // If we've been through this loop too many times, we need |
157 | + | // to just give up and assign the molecule to this processor |
158 | + | // and be done with it. |
159 | + | |
160 | + | if (loops > 100) { |
161 | + | sprintf( painCave.errMsg, |
162 | + | "I've tried 100 times to assign molecule %d to a " |
163 | + | " processor, but can't find a good spot.\n" |
164 | + | "I'm assigning it at random to processor %d.\n", |
165 | + | i, which_proc); |
166 | + | painCave.isFatal = 0; |
167 | + | simError(); |
168 | + | |
169 | + | MolToProcMap[i] = which_proc; |
170 | + | AtomsPerProc[which_proc] += add_atoms; |
171 | + | for (j = 0 ; j < add_atoms; j++ ) { |
172 | + | AtomToProcMap[atomIndex] = which_proc; |
173 | + | atomIndex++; |
174 | + | } |
175 | + | GroupsPerProc[which_proc] += add_groups; |
176 | + | for (j=0; j < add_groups; j++) { |
177 | + | GroupToProcMap[groupIndex] = which_proc; |
178 | + | groupIndex++; |
179 | + | } |
180 | + | done = 1; |
181 | + | continue; |
182 | + | } |
183 | ||
184 | < | while( !done ){ |
185 | < | |
186 | < | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
187 | < | compStart = molIndex; |
188 | < | compIndex++; |
189 | < | continue; |
190 | < | } |
184 | > | // If we can add this molecule to this processor without sending |
185 | > | // it above nTarget, then go ahead and do it: |
186 | > | |
187 | > | if (new_atoms <= nTarget) { |
188 | > | MolToProcMap[i] = which_proc; |
189 | > | AtomsPerProc[which_proc] += add_atoms; |
190 | > | for (j = 0 ; j < add_atoms; j++ ) { |
191 | > | AtomToProcMap[atomIndex] = which_proc; |
192 | > | atomIndex++; |
193 | > | } |
194 | > | GroupsPerProc[which_proc] += add_groups; |
195 | > | for (j=0; j < add_groups; j++) { |
196 | > | GroupToProcMap[groupIndex] = which_proc; |
197 | > | groupIndex++; |
198 | > | } |
199 | > | done = 1; |
200 | > | continue; |
201 | > | } |
202 | ||
203 | < | nLocal += compStamps[compIndex]->getNAtoms(); |
204 | < | atomIndex += compStamps[compIndex]->getNAtoms(); |
205 | < | molIndex++; |
206 | < | molLocal++; |
203 | > | |
204 | > | // The only situation left is when new_atoms > nTarget. We |
205 | > | // want to accept this with some probability that dies off the |
206 | > | // farther we are from nTarget |
207 | > | |
208 | > | // roughly: x = new_atoms - nTarget |
209 | > | // Pacc(x) = exp(- a * x) |
210 | > | // where a = penalty / (average atoms per molecule) |
211 | > | |
212 | > | x = (double) (new_atoms - nTarget); |
213 | > | y = myRandom->getRandom(); |
214 | ||
215 | < | if ( nLocal == nTarget ) done = 1; |
216 | < | |
217 | < | else if( nLocal < nTarget ){ |
218 | < | smallDiff = nTarget - nLocal; |
219 | < | } |
220 | < | else if( nLocal > nTarget ){ |
221 | < | bigDiff = nLocal - nTarget; |
222 | < | |
223 | < | if( bigDiff < smallDiff ) done = 1; |
224 | < | else{ |
225 | < | molIndex--; |
226 | < | molLocal--; |
227 | < | atomIndex -= compStamps[compIndex]->getNAtoms(); |
228 | < | nLocal -= compStamps[compIndex]->getNAtoms(); |
229 | < | done = 1; |
230 | < | } |
215 | > | if (y < exp(- a * x)) { |
216 | > | MolToProcMap[i] = which_proc; |
217 | > | AtomsPerProc[which_proc] += add_atoms; |
218 | > | for (j = 0 ; j < add_atoms; j++ ) { |
219 | > | AtomToProcMap[atomIndex] = which_proc; |
220 | > | atomIndex++; |
221 | > | } |
222 | > | GroupsPerProc[which_proc] += add_groups; |
223 | > | for (j=0; j < add_groups; j++) { |
224 | > | GroupToProcMap[groupIndex] = which_proc; |
225 | > | groupIndex++; |
226 | > | } |
227 | > | done = 1; |
228 | > | continue; |
229 | > | } else { |
230 | > | continue; |
231 | > | } |
232 | > | |
233 | } | |
234 | } | |
235 | + | |
236 | + | |
237 | + | // Spray out this nonsense to all other processors: |
238 | + | |
239 | + | //std::cerr << "node 0 mol2proc = \n"; |
240 | + | //for (i = 0; i < parallelData->nMolGlobal; i++) |
241 | + | // std::cerr << i << "\t" << MolToProcMap[i] << "\n"; |
242 | + | |
243 | + | MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
244 | + | MPI_INT, 0, MPI_COMM_WORLD); |
245 | + | |
246 | + | MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
247 | + | MPI_INT, 0, MPI_COMM_WORLD); |
248 | + | |
249 | + | MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
250 | + | MPI_INT, 0, MPI_COMM_WORLD); |
251 | + | |
252 | + | MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
253 | + | MPI_INT, 0, MPI_COMM_WORLD); |
254 | + | |
255 | + | MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
256 | + | MPI_INT, 0, MPI_COMM_WORLD); |
257 | + | |
258 | + | MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
259 | + | MPI_INT, 0, MPI_COMM_WORLD); |
260 | + | } else { |
261 | + | |
262 | + | // Listen to your marching orders from processor 0: |
263 | ||
264 | < | if( i == mpiPlug->myNode ){ |
265 | < | mpiPlug->myMolEnd = (molIndex - 1); |
125 | < | mpiPlug->myAtomEnd = (atomIndex - 1); |
126 | < | mpiPlug->myNlocal = nLocal; |
127 | < | mpiPlug->myMol = molLocal; |
128 | < | } |
264 | > | MPI_Bcast(MolToProcMap, parallelData->nMolGlobal, |
265 | > | MPI_INT, 0, MPI_COMM_WORLD); |
266 | ||
267 | < | numerator = (double)( entryPlug->n_atoms - atomIndex ); |
268 | < | denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
269 | < | precast = numerator / denominator; |
270 | < | nTarget = (int)( precast + 0.5 ); |
267 | > | MPI_Bcast(AtomToProcMap, parallelData->nAtomsGlobal, |
268 | > | MPI_INT, 0, MPI_COMM_WORLD); |
269 | > | |
270 | > | MPI_Bcast(GroupToProcMap, parallelData->nGroupsGlobal, |
271 | > | MPI_INT, 0, MPI_COMM_WORLD); |
272 | > | |
273 | > | MPI_Bcast(MolComponentType, parallelData->nMolGlobal, |
274 | > | MPI_INT, 0, MPI_COMM_WORLD); |
275 | > | |
276 | > | MPI_Bcast(AtomsPerProc, parallelData->nProcessors, |
277 | > | MPI_INT, 0, MPI_COMM_WORLD); |
278 | > | |
279 | > | MPI_Bcast(GroupsPerProc, parallelData->nProcessors, |
280 | > | MPI_INT, 0, MPI_COMM_WORLD); |
281 | > | |
282 | > | |
283 | } | |
135 | – | |
136 | – | if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
137 | – | mpiPlug->myMolStart = molIndex; |
138 | – | mpiPlug->myAtomStart = atomIndex; |
284 | ||
285 | < | nLocal = 0; |
141 | < | molLocal = 0; |
142 | < | while( compIndex < nComponents ){ |
143 | < | |
144 | < | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
145 | < | compStart = molIndex; |
146 | < | compIndex++; |
147 | < | continue; |
148 | < | } |
285 | > | // Let's all check for sanity: |
286 | ||
287 | < | nLocal += compStamps[compIndex]->getNAtoms(); |
288 | < | atomIndex += compStamps[compIndex]->getNAtoms(); |
289 | < | molIndex++; |
290 | < | molLocal++; |
291 | < | } |
155 | < | |
156 | < | mpiPlug->myMolEnd = (molIndex - 1); |
157 | < | mpiPlug->myAtomEnd = (atomIndex - 1); |
158 | < | mpiPlug->myNlocal = nLocal; |
159 | < | mpiPlug->myMol = molLocal; |
287 | > | nmol_local = 0; |
288 | > | for (i = 0 ; i < parallelData->nMolGlobal; i++ ) { |
289 | > | if (MolToProcMap[i] == parallelData->myNode) { |
290 | > | nmol_local++; |
291 | > | } |
292 | } | |
293 | ||
294 | + | natoms_local = 0; |
295 | + | for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
296 | + | if (AtomToProcMap[i] == parallelData->myNode) { |
297 | + | natoms_local++; |
298 | + | } |
299 | + | } |
300 | ||
301 | < | MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
302 | < | |
303 | < | if( mpiPlug->myNode == 0 ){ |
304 | < | if( testSum != entryPlug->n_atoms ){ |
167 | < | sprintf( painCave.errMsg, |
168 | < | "The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
169 | < | testSum, entryPlug->n_atoms ); |
170 | < | painCave.isFatal = 1; |
171 | < | simError(); |
301 | > | ngroups_local = 0; |
302 | > | for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
303 | > | if (GroupToProcMap[i] == parallelData->myNode) { |
304 | > | ngroups_local++; |
305 | } | |
306 | } | |
307 | ||
308 | + | MPI_Allreduce(&nmol_local,&nmol_global,1,MPI_INT,MPI_SUM, |
309 | + | MPI_COMM_WORLD); |
310 | + | |
311 | + | MPI_Allreduce(&natoms_local,&natoms_global,1,MPI_INT, |
312 | + | MPI_SUM, MPI_COMM_WORLD); |
313 | + | |
314 | + | MPI_Allreduce(&ngroups_local,&ngroups_global,1,MPI_INT, |
315 | + | MPI_SUM, MPI_COMM_WORLD); |
316 | + | |
317 | + | if( nmol_global != entryPlug->n_mol ){ |
318 | + | sprintf( painCave.errMsg, |
319 | + | "The sum of all nmol_local, %d, did not equal the " |
320 | + | "total number of molecules, %d.\n", |
321 | + | nmol_global, entryPlug->n_mol ); |
322 | + | painCave.isFatal = 1; |
323 | + | simError(); |
324 | + | } |
325 | + | |
326 | + | if( natoms_global != entryPlug->n_atoms ){ |
327 | + | sprintf( painCave.errMsg, |
328 | + | "The sum of all natoms_local, %d, did not equal the " |
329 | + | "total number of atoms, %d.\n", |
330 | + | natoms_global, entryPlug->n_atoms ); |
331 | + | painCave.isFatal = 1; |
332 | + | simError(); |
333 | + | } |
334 | + | |
335 | + | if( ngroups_global != entryPlug->ngroup ){ |
336 | + | sprintf( painCave.errMsg, |
337 | + | "The sum of all ngroups_local, %d, did not equal the " |
338 | + | "total number of cutoffGroups, %d.\n", |
339 | + | ngroups_global, entryPlug->ngroup ); |
340 | + | painCave.isFatal = 1; |
341 | + | simError(); |
342 | + | } |
343 | + | |
344 | sprintf( checkPointMsg, | |
345 | "Successfully divided the molecules among the processors.\n" ); | |
346 | MPIcheckPoint(); | |
347 | ||
348 | < | // lets create the identity array |
348 | > | parallelData->nMolLocal = nmol_local; |
349 | > | parallelData->nAtomsLocal = natoms_local; |
350 | > | parallelData->nGroupsLocal = ngroups_local; |
351 | ||
352 | < | globalIndex = new int[mpiPlug->myNlocal]; |
353 | < | index = mpiPlug->myAtomStart; |
354 | < | for( i=0; i<mpiPlug->myNlocal; i++){ |
355 | < | globalIndex[i] = index; |
356 | < | index++; |
352 | > | globalAtomIndex.resize(parallelData->nAtomsLocal); |
353 | > | globalToLocalAtom.resize(parallelData->nAtomsGlobal); |
354 | > | local_index = 0; |
355 | > | for (i = 0; i < parallelData->nAtomsGlobal; i++) { |
356 | > | if (AtomToProcMap[i] == parallelData->myNode) { |
357 | > | globalAtomIndex[local_index] = i; |
358 | > | |
359 | > | globalToLocalAtom[i] = local_index; |
360 | > | local_index++; |
361 | > | |
362 | > | } |
363 | > | else |
364 | > | globalToLocalAtom[i] = -1; |
365 | } | |
366 | ||
367 | < | return globalIndex; |
367 | > | globalGroupIndex.resize(parallelData->nGroupsLocal); |
368 | > | globalToLocalGroup.resize(parallelData->nGroupsGlobal); |
369 | > | local_index = 0; |
370 | > | for (i = 0; i < parallelData->nGroupsGlobal; i++) { |
371 | > | if (GroupToProcMap[i] == parallelData->myNode) { |
372 | > | globalGroupIndex[local_index] = i; |
373 | > | |
374 | > | globalToLocalGroup[i] = local_index; |
375 | > | local_index++; |
376 | > | |
377 | > | } |
378 | > | else |
379 | > | globalToLocalGroup[i] = -1; |
380 | > | } |
381 | > | |
382 | > | globalMolIndex.resize(parallelData->nMolLocal); |
383 | > | globalToLocalMol.resize(parallelData->nMolGlobal); |
384 | > | local_index = 0; |
385 | > | for (i = 0; i < parallelData->nMolGlobal; i++) { |
386 | > | if (MolToProcMap[i] == parallelData->myNode) { |
387 | > | globalMolIndex[local_index] = i; |
388 | > | globalToLocalMol[i] = local_index; |
389 | > | local_index++; |
390 | > | } |
391 | > | else |
392 | > | globalToLocalMol[i] = -1; |
393 | > | } |
394 | > | |
395 | } | |
396 | ||
397 | ||
398 | void mpiSimulation::mpiRefresh( void ){ | |
399 | ||
400 | int isError, i; | |
401 | < | int *globalIndex = new int[mpiPlug->myNlocal]; |
401 | > | int *localToGlobalAtomIndex = new int[parallelData->nAtomsLocal]; |
402 | > | int *localToGlobalGroupIndex = new int[parallelData->nGroupsLocal]; |
403 | ||
404 | < | for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
404 | > | // Fortran indexing needs to be increased by 1 in order to get the 2 |
405 | > | // languages to not barf |
406 | ||
407 | + | for(i = 0; i < parallelData->nAtomsLocal; i++) |
408 | + | localToGlobalAtomIndex[i] = globalAtomIndex[i] + 1; |
409 | + | |
410 | + | for(i = 0; i < parallelData->nGroupsLocal; i++) |
411 | + | localToGlobalGroupIndex[i] = globalGroupIndex[i] + 1; |
412 | ||
413 | isError = 0; | |
414 | < | setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
414 | > | |
415 | > | setFsimParallel( parallelData, |
416 | > | &(parallelData->nAtomsLocal), localToGlobalAtomIndex, |
417 | > | &(parallelData->nGroupsLocal), localToGlobalGroupIndex, |
418 | > | &isError ); |
419 | > | |
420 | if( isError ){ | |
421 | ||
422 | sprintf( painCave.errMsg, | |
# | Line 207 | Line 425 | void mpiSimulation::mpiRefresh( void ){ | |
425 | simError(); | |
426 | } | |
427 | ||
428 | < | delete[] globalIndex; |
428 | > | delete[] localToGlobalGroupIndex; |
429 | > | delete[] localToGlobalAtomIndex; |
430 | ||
431 | + | |
432 | sprintf( checkPointMsg, | |
433 | " mpiRefresh successful.\n" ); | |
434 | MPIcheckPoint(); |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |