1 |
#ifdef IS_MPI |
2 |
|
3 |
#include <cstdlib> |
4 |
#include <cstring> |
5 |
#include <mpi.h> |
6 |
#include <mpi++.h> |
7 |
|
8 |
#include "mpiSimulation.hpp" |
9 |
#include "simError.h" |
10 |
#include "fortranWrappers.hpp" |
11 |
|
12 |
|
13 |
|
14 |
|
15 |
mpiSimulation* mpiSim; |
16 |
|
17 |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
18 |
{ |
19 |
entryPlug = the_entryPlug; |
20 |
mpiPlug = new mpiSimData; |
21 |
|
22 |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
23 |
mpiPlug->myNode = worldRank; |
24 |
|
25 |
mpiSim = this; |
26 |
wrapMeSimParallel( this ); |
27 |
} |
28 |
|
29 |
|
30 |
mpiSimulation::~mpiSimulation(){ |
31 |
|
32 |
delete mpiPlug; |
33 |
// perhaps we should let fortran know the party is over. |
34 |
|
35 |
} |
36 |
|
37 |
|
38 |
|
39 |
int* mpiSimulation::divideLabor( void ){ |
40 |
|
41 |
int* globalIndex; |
42 |
|
43 |
int nComponents; |
44 |
MoleculeStamp** compStamps; |
45 |
int* componentsNmol; |
46 |
|
47 |
double numerator; |
48 |
double denominator; |
49 |
double precast; |
50 |
|
51 |
int nTarget; |
52 |
int molIndex, atomIndex, compIndex, compStart; |
53 |
int done; |
54 |
int nLocal, molLocal; |
55 |
int i, index; |
56 |
int smallDiff, bigDiff; |
57 |
|
58 |
int testSum; |
59 |
|
60 |
nComponents = entryPlug->nComponents; |
61 |
compStamps = entryPlug->compStamps; |
62 |
componentsNmol = entryPlug->componentsNmol; |
63 |
|
64 |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
65 |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
66 |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
67 |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
68 |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
69 |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
70 |
|
71 |
|
72 |
|
73 |
|
74 |
|
75 |
|
76 |
|
77 |
|
78 |
|
79 |
|
80 |
|
81 |
|
82 |
numerator = (double) entryPlug->n_atoms; |
83 |
denominator = (double) mpiPlug->numberProcessors; |
84 |
precast = numerator / denominator; |
85 |
nTarget = (int)( precast + 0.5 ); |
86 |
|
87 |
molIndex = 0; |
88 |
atomIndex = 0; |
89 |
compIndex = 0; |
90 |
compStart = 0; |
91 |
for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
92 |
|
93 |
done = 0; |
94 |
nLocal = 0; |
95 |
molLocal = 0; |
96 |
|
97 |
if( i == mpiPlug->myNode ){ |
98 |
mpiPlug->myMolStart = molIndex; |
99 |
mpiPlug->myAtomStart = atomIndex; |
100 |
} |
101 |
|
102 |
while( !done ){ |
103 |
|
104 |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
105 |
compStart = molIndex; |
106 |
compIndex++; |
107 |
continue; |
108 |
} |
109 |
|
110 |
nLocal += compStamps[compIndex]->getNAtoms(); |
111 |
atomIndex += compStamps[compIndex]->getNAtoms(); |
112 |
molIndex++; |
113 |
molLocal++; |
114 |
|
115 |
if ( nLocal == nTarget ) done = 1; |
116 |
|
117 |
else if( nLocal < nTarget ){ |
118 |
smallDiff = nTarget - nLocal; |
119 |
} |
120 |
else if( nLocal > nTarget ){ |
121 |
bigDiff = nLocal - nTarget; |
122 |
|
123 |
if( bigDiff < smallDiff ) done = 1; |
124 |
else{ |
125 |
molIndex--; |
126 |
molLocal--; |
127 |
atomIndex -= compStamps[compIndex]->getNAtoms(); |
128 |
nLocal -= compStamps[compIndex]->getNAtoms(); |
129 |
done = 1; |
130 |
} |
131 |
} |
132 |
} |
133 |
|
134 |
if( i == mpiPlug->myNode ){ |
135 |
mpiPlug->myMolEnd = (molIndex - 1); |
136 |
mpiPlug->myAtomEnd = (atomIndex - 1); |
137 |
mpiPlug->myNlocal = nLocal; |
138 |
mpiPlug->myMol = molLocal; |
139 |
} |
140 |
|
141 |
numerator = (double)( entryPlug->n_atoms - atomIndex ); |
142 |
denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
143 |
precast = numerator / denominator; |
144 |
nTarget = (int)( precast + 0.5 ); |
145 |
} |
146 |
|
147 |
if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
148 |
mpiPlug->myMolStart = molIndex; |
149 |
mpiPlug->myAtomStart = atomIndex; |
150 |
|
151 |
nLocal = 0; |
152 |
molLocal = 0; |
153 |
while( compIndex < nComponents ){ |
154 |
|
155 |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
156 |
compStart = molIndex; |
157 |
compIndex++; |
158 |
continue; |
159 |
} |
160 |
|
161 |
nLocal += compStamps[compIndex]->getNAtoms(); |
162 |
atomIndex += compStamps[compIndex]->getNAtoms(); |
163 |
molIndex++; |
164 |
molLocal++; |
165 |
} |
166 |
|
167 |
mpiPlug->myMolEnd = (molIndex - 1); |
168 |
mpiPlug->myAtomEnd = (atomIndex - 1); |
169 |
mpiPlug->myNlocal = nLocal; |
170 |
mpiPlug->myMol = molLocal; |
171 |
} |
172 |
|
173 |
|
174 |
MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
175 |
|
176 |
if( mpiPlug->myNode == 0 ){ |
177 |
if( testSum != entryPlug->n_atoms ){ |
178 |
sprintf( painCave.errMsg, |
179 |
"The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
180 |
testSum, entryPlug->n_atoms ); |
181 |
painCave.isFatal = 1; |
182 |
simError(); |
183 |
} |
184 |
} |
185 |
|
186 |
sprintf( checkPointMsg, |
187 |
"Successfully divided the molecules among the processors.\n" ); |
188 |
MPIcheckPoint(); |
189 |
|
190 |
// lets create the identity array |
191 |
|
192 |
globalIndex = new int[mpiPlug->myNlocal]; |
193 |
index = mpiPlug->myAtomStart; |
194 |
for( i=0; i<mpiPlug->myNlocal; i++){ |
195 |
globalIndex[i] = index; |
196 |
index++; |
197 |
} |
198 |
|
199 |
return globalIndex; |
200 |
} |
201 |
|
202 |
|
203 |
void mpiSimulation::mpiRefresh( void ){ |
204 |
|
205 |
int isError, i; |
206 |
int *globalIndex = new int[mpiPlug->myNlocal]; |
207 |
|
208 |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
209 |
|
210 |
|
211 |
isError = 0; |
212 |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
213 |
if( isError ){ |
214 |
|
215 |
sprintf( painCave.errMsg, |
216 |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
217 |
painCave.isFatal = 1; |
218 |
simError(); |
219 |
} |
220 |
|
221 |
delete[] globalIndex; |
222 |
|
223 |
sprintf( checkPointMsg, |
224 |
" mpiRefresh successful.\n" ); |
225 |
MPIcheckPoint(); |
226 |
} |
227 |
|
228 |
|
229 |
#endif // is_mpi |