1 |
#include <cstdlib> |
2 |
#include <cstring> |
3 |
#include <mpi.h> |
4 |
#include <mpi++.h> |
5 |
|
6 |
#include "mpiSimulation.hpp" |
7 |
#include "simError.h" |
8 |
|
9 |
extern "C"{ |
10 |
void wrapsimparallelmod_( void (*wrapFunction)(void (*fSub)( mpiSimData*, |
11 |
int*, int*, |
12 |
int*))); |
13 |
} |
14 |
|
15 |
void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*)); |
16 |
|
17 |
|
18 |
mpiSimulation* mpiSim; |
19 |
|
20 |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
21 |
{ |
22 |
entryPlug = the_entryPlug; |
23 |
mpiPlug = new mpiSimData; |
24 |
|
25 |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
26 |
mpiPlug->myNode = worldRank; |
27 |
|
28 |
mpiSim = this; |
29 |
wrapMe(); |
30 |
|
31 |
} |
32 |
|
33 |
|
34 |
mpiSimulation::~mpiSimulation(){ |
35 |
|
36 |
delete mpiPlug; |
37 |
// perhaps we should let fortran know the party is over. |
38 |
|
39 |
} |
40 |
|
41 |
void mpiSimulation::wrapMe(){ |
42 |
|
43 |
wrapsimparallelmod_( wrapSimParallel ); |
44 |
} |
45 |
|
46 |
|
47 |
|
48 |
int* mpiSimulation::divideLabor( void ){ |
49 |
|
50 |
int* globalIndex; |
51 |
|
52 |
int nComponents; |
53 |
MoleculeStamp** compStamps; |
54 |
int* componentsNmol; |
55 |
|
56 |
double numerator; |
57 |
double denominator; |
58 |
double precast; |
59 |
|
60 |
int nTarget; |
61 |
int molIndex, atomIndex, compIndex, compStart; |
62 |
int done; |
63 |
int nLocal, molLocal; |
64 |
int i, index; |
65 |
int smallDiff, bigDiff; |
66 |
|
67 |
int testSum; |
68 |
|
69 |
nComponents = entryPlug->nComponents; |
70 |
compStamps = entryPlug->compStamps; |
71 |
componentsNmol = entryPlug->componentsNmol; |
72 |
|
73 |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
74 |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
75 |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
76 |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
77 |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
78 |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
79 |
|
80 |
numerator = (double) entryPlug->n_atoms; |
81 |
denominator = (double) mpiPlug->numberProcessors; |
82 |
precast = numerator / denominator; |
83 |
nTarget = (int)( precast + 0.5 ); |
84 |
|
85 |
molIndex = 0; |
86 |
atomIndex = 0; |
87 |
compIndex = 0; |
88 |
compStart = 0; |
89 |
for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
90 |
|
91 |
done = 0; |
92 |
nLocal = 0; |
93 |
molLocal = 0; |
94 |
|
95 |
if( i == mpiPlug->myNode ){ |
96 |
mpiPlug->myMolStart = molIndex; |
97 |
mpiPlug->myAtomStart = atomIndex; |
98 |
} |
99 |
|
100 |
while( !done ){ |
101 |
|
102 |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
103 |
compStart = molIndex; |
104 |
compIndex++; |
105 |
continue; |
106 |
} |
107 |
|
108 |
nLocal += compStamps[compIndex]->getNAtoms(); |
109 |
atomIndex += compStamps[compIndex]->getNAtoms(); |
110 |
molIndex++; |
111 |
molLocal++; |
112 |
|
113 |
if ( nLocal == nTarget ) done = 1; |
114 |
|
115 |
else if( nLocal < nTarget ){ |
116 |
smallDiff = nTarget - nLocal; |
117 |
} |
118 |
else if( nLocal > nTarget ){ |
119 |
bigDiff = nLocal - nTarget; |
120 |
|
121 |
if( bigDiff < smallDiff ) done = 1; |
122 |
else{ |
123 |
molIndex--; |
124 |
molLocal--; |
125 |
atomIndex -= compStamps[compIndex]->getNAtoms(); |
126 |
nLocal -= compStamps[compIndex]->getNAtoms(); |
127 |
done = 1; |
128 |
} |
129 |
} |
130 |
} |
131 |
|
132 |
if( i == mpiPlug->myNode ){ |
133 |
mpiPlug->myMolEnd = (molIndex - 1); |
134 |
mpiPlug->myAtomEnd = (atomIndex - 1); |
135 |
mpiPlug->myNlocal = nLocal; |
136 |
mpiPlug->myMol = molLocal; |
137 |
} |
138 |
|
139 |
numerator = (double)( entryPlug->n_atoms - atomIndex ); |
140 |
denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
141 |
precast = numerator / denominator; |
142 |
nTarget = (int)( precast + 0.5 ); |
143 |
} |
144 |
|
145 |
if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
146 |
mpiPlug->myMolStart = molIndex; |
147 |
mpiPlug->myAtomStart = atomIndex; |
148 |
|
149 |
nLocal = 0; |
150 |
molLocal = 0; |
151 |
while( compIndex < nComponents ){ |
152 |
|
153 |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
154 |
compStart = molIndex; |
155 |
compIndex++; |
156 |
continue; |
157 |
} |
158 |
|
159 |
nLocal += compStamps[compIndex]->getNAtoms(); |
160 |
atomIndex += compStamps[compIndex]->getNAtoms(); |
161 |
molIndex++; |
162 |
molLocal++; |
163 |
} |
164 |
|
165 |
mpiPlug->myMolEnd = (molIndex - 1); |
166 |
mpiPlug->myAtomEnd = (atomIndex - 1); |
167 |
mpiPlug->myNlocal = nLocal; |
168 |
mpiPlug->myMol = molLocal; |
169 |
} |
170 |
|
171 |
|
172 |
MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
173 |
|
174 |
if( mpiPlug->myNode == 0 ){ |
175 |
if( testSum != entryPlug->n_atoms ){ |
176 |
sprintf( painCave.errMsg, |
177 |
"The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
178 |
testSum, entryPlug->n_atoms ); |
179 |
painCave.isFatal = 1; |
180 |
simError(); |
181 |
} |
182 |
} |
183 |
|
184 |
sprintf( checkPointMsg, |
185 |
"Successfully divided the molecules among the processors.\n" ); |
186 |
MPIcheckPoint(); |
187 |
|
188 |
// lets create the identity array |
189 |
|
190 |
globalIndex = new int[mpiPlug->myNlocal]; |
191 |
index = mpiPlug->myAtomStart; |
192 |
for( i=0; i<mpiPlug->myNlocal; i++){ |
193 |
globalIndex[i] = index; |
194 |
index++; |
195 |
} |
196 |
|
197 |
return globalIndex; |
198 |
} |
199 |
|
200 |
|
201 |
void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*)){ |
202 |
|
203 |
mpiSim->setInternal( fSub ); |
204 |
} |
205 |
|
206 |
|
207 |
void mpiSimulation::mpiRefresh( void ){ |
208 |
|
209 |
int isError, i; |
210 |
int *globalIndex = new int[mpiPlug->myNlocal]; |
211 |
|
212 |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
213 |
|
214 |
|
215 |
isError = 0; |
216 |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
217 |
if( isError ){ |
218 |
|
219 |
sprintf( painCave.errMsg, |
220 |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
221 |
painCave.isFatal = 1; |
222 |
simError(); |
223 |
} |
224 |
|
225 |
delete[] globalIndex; |
226 |
|
227 |
sprintf( checkPointMsg, |
228 |
" mpiRefresh successful.\n" ); |
229 |
MPIcheckPoint(); |
230 |
} |
231 |
|