1 |
< |
#include <mpi++.h> |
2 |
< |
#include <mpiSimulation.hpp> |
1 |
> |
#include <cstdlib> |
2 |
> |
#include <cstring> |
3 |
> |
#include <mpi.h> |
4 |
|
|
5 |
< |
mpiSimulation::mpiSimulation() |
5 |
> |
#include "mpiSimulation.hpp" |
6 |
> |
#include "simError.h" |
7 |
> |
|
8 |
> |
extern "C"{ |
9 |
> |
void wrapsimparallelmod_( void (*wrapFunction)(void (*fSub)( mpiSimData*, |
10 |
> |
int*, int*, |
11 |
> |
int*))); |
12 |
> |
} |
13 |
> |
|
14 |
> |
void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*)); |
15 |
> |
|
16 |
> |
|
17 |
> |
mpiSimulation* mpiSim; |
18 |
> |
|
19 |
> |
mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
20 |
|
{ |
21 |
< |
int mpi_error; |
21 |
> |
entryPlug = the_entryPlug; |
22 |
> |
mpiPlug = new mpiSimData; |
23 |
> |
|
24 |
> |
mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
25 |
> |
mpiPlug->myNode = worldRank; |
26 |
> |
|
27 |
> |
mpiSim = this; |
28 |
> |
wrapMe(); |
29 |
|
|
30 |
< |
MPI::Init(); |
30 |
> |
} |
31 |
|
|
32 |
< |
numberProcessors = MPI::Comm::Get_size(); |
33 |
< |
myNode = MPI::Comm::Get_rank(); |
34 |
< |
MPI::Get_processor_name(processorName,&processorNameLen); |
32 |
> |
|
33 |
> |
mpiSimulation::~mpiSimulation(){ |
34 |
> |
|
35 |
> |
delete mpiPlug; |
36 |
> |
// perhaps we should let fortran know the party is over. |
37 |
> |
|
38 |
|
} |
39 |
|
|
40 |
< |
mpiSimulation::mpiInitSimulation(SimInfo* entry_plug) |
16 |
< |
{ |
40 |
> |
void mpiSimulation::wrapMe(){ |
41 |
|
|
42 |
< |
// need to get nmol here...... |
42 |
> |
wrapsimparallelmod_( wrapSimParallel ); |
43 |
> |
} |
44 |
|
|
45 |
|
|
46 |
|
|
47 |
< |
myMolStart = nint(float(node)/numberProcessors*entry_plug->n_mol); |
48 |
< |
myMolEnd = nint(float(node + 1)/numberProcessors*entry_plug->n_mol;); |
49 |
< |
nMolLocal = myMolEnd - myMolStart + 1 |
47 |
> |
int* mpiSimulation::divideLabor( void ){ |
48 |
> |
|
49 |
> |
int* globalIndex; |
50 |
> |
|
51 |
> |
int nComponents; |
52 |
> |
MoleculeStamp** compStamps; |
53 |
> |
int* componentsNmol; |
54 |
> |
|
55 |
> |
double numerator; |
56 |
> |
double denominator; |
57 |
> |
double precast; |
58 |
> |
|
59 |
> |
int nTarget; |
60 |
> |
int molIndex, atomIndex, compIndex, compStart; |
61 |
> |
int done; |
62 |
> |
int nLocal, molLocal; |
63 |
> |
int i, index; |
64 |
> |
int smallDiff, bigDiff; |
65 |
> |
|
66 |
> |
int testSum; |
67 |
> |
|
68 |
> |
nComponents = entryPlug->nComponents; |
69 |
> |
compStamps = entryPlug->compStamps; |
70 |
> |
componentsNmol = entryPlug->componentsNmol; |
71 |
> |
|
72 |
> |
mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
73 |
> |
mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
74 |
> |
mpiPlug->nBendsGlobal = entryPlug->n_bends; |
75 |
> |
mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
76 |
> |
mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
77 |
> |
mpiPlug->nMolGlobal = entryPlug->n_mol; |
78 |
> |
|
79 |
> |
numerator = (double) entryPlug->n_atoms; |
80 |
> |
denominator = (double) mpiPlug->numberProcessors; |
81 |
> |
precast = numerator / denominator; |
82 |
> |
nTarget = (int)( precast + 0.5 ); |
83 |
> |
|
84 |
> |
molIndex = 0; |
85 |
> |
atomIndex = 0; |
86 |
> |
compIndex = 0; |
87 |
> |
compStart = 0; |
88 |
> |
for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
89 |
> |
|
90 |
> |
done = 0; |
91 |
> |
nLocal = 0; |
92 |
> |
molLocal = 0; |
93 |
> |
|
94 |
> |
if( i == mpiPlug->myNode ){ |
95 |
> |
mpiPlug->myMolStart = molIndex; |
96 |
> |
mpiPlug->myAtomStart = atomIndex; |
97 |
> |
} |
98 |
> |
|
99 |
> |
while( !done ){ |
100 |
> |
|
101 |
> |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
102 |
> |
compStart = molIndex; |
103 |
> |
compIndex++; |
104 |
> |
continue; |
105 |
> |
} |
106 |
> |
|
107 |
> |
nLocal += compStamps[compIndex]->getNAtoms(); |
108 |
> |
atomIndex += compStamps[compIndex]->getNAtoms(); |
109 |
> |
molIndex++; |
110 |
> |
molLocal++; |
111 |
> |
|
112 |
> |
if ( nLocal == nTarget ) done = 1; |
113 |
> |
|
114 |
> |
else if( nLocal < nTarget ){ |
115 |
> |
smallDiff = nTarget - nLocal; |
116 |
> |
} |
117 |
> |
else if( nLocal > nTarget ){ |
118 |
> |
bigDiff = nLocal - nTarget; |
119 |
> |
|
120 |
> |
if( bigDiff < smallDiff ) done = 1; |
121 |
> |
else{ |
122 |
> |
molIndex--; |
123 |
> |
molLocal--; |
124 |
> |
atomIndex -= compStamps[compIndex]->getNAtoms(); |
125 |
> |
nLocal -= compStamps[compIndex]->getNAtoms(); |
126 |
> |
done = 1; |
127 |
> |
} |
128 |
> |
} |
129 |
> |
} |
130 |
> |
|
131 |
> |
if( i == mpiPlug->myNode ){ |
132 |
> |
mpiPlug->myMolEnd = (molIndex - 1); |
133 |
> |
mpiPlug->myAtomEnd = (atomIndex - 1); |
134 |
> |
mpiPlug->myNlocal = nLocal; |
135 |
> |
mpiPlug->myMol = molLocal; |
136 |
> |
} |
137 |
> |
|
138 |
> |
numerator = (double)( entryPlug->n_atoms - atomIndex ); |
139 |
> |
denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
140 |
> |
precast = numerator / denominator; |
141 |
> |
nTarget = (int)( precast + 0.5 ); |
142 |
> |
} |
143 |
> |
|
144 |
> |
if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
145 |
> |
mpiPlug->myMolStart = molIndex; |
146 |
> |
mpiPlug->myAtomStart = atomIndex; |
147 |
> |
|
148 |
> |
nLocal = 0; |
149 |
> |
molLocal = 0; |
150 |
> |
while( compIndex < nComponents ){ |
151 |
> |
|
152 |
> |
if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
153 |
> |
compStart = molIndex; |
154 |
> |
compIndex++; |
155 |
> |
continue; |
156 |
> |
} |
157 |
> |
|
158 |
> |
nLocal += compStamps[compIndex]->getNAtoms(); |
159 |
> |
atomIndex += compStamps[compIndex]->getNAtoms(); |
160 |
> |
molIndex++; |
161 |
> |
molLocal++; |
162 |
> |
} |
163 |
> |
|
164 |
> |
mpiPlug->myMolEnd = (molIndex - 1); |
165 |
> |
mpiPlug->myAtomEnd = (atomIndex - 1); |
166 |
> |
mpiPlug->myNlocal = nLocal; |
167 |
> |
mpiPlug->myMol = molLocal; |
168 |
> |
} |
169 |
> |
|
170 |
> |
|
171 |
> |
MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
172 |
> |
|
173 |
> |
if( mpiPlug->myNode == 0 ){ |
174 |
> |
if( testSum != entryPlug->n_atoms ){ |
175 |
> |
sprintf( painCave.errMsg, |
176 |
> |
"The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
177 |
> |
testSum, entryPlug->n_atoms ); |
178 |
> |
painCave.isFatal = 1; |
179 |
> |
simError(); |
180 |
> |
} |
181 |
> |
} |
182 |
> |
|
183 |
> |
sprintf( checkPointMsg, |
184 |
> |
"Successfully divided the molecules among the processors.\n" ); |
185 |
> |
MPIcheckPoint(); |
186 |
> |
|
187 |
> |
// lets create the identity array |
188 |
> |
|
189 |
> |
globalIndex = new int[mpiPlug->myNlocal]; |
190 |
> |
index = mpiPlug->myAtomStart; |
191 |
> |
for( i=0; i<mpiPlug->myNlocal; i++){ |
192 |
> |
globalIndex[i] = index; |
193 |
> |
index++; |
194 |
> |
} |
195 |
> |
|
196 |
> |
return globalIndex; |
197 |
|
} |
198 |
+ |
|
199 |
+ |
|
200 |
+ |
void wrapSimParallel(void (*fSub)(mpiSimData*, int*, int*, int*)){ |
201 |
+ |
|
202 |
+ |
mpiSim->setInternal( fSub ); |
203 |
+ |
} |
204 |
+ |
|
205 |
+ |
|
206 |
+ |
void mpiSimulation::mpiRefresh( void ){ |
207 |
+ |
|
208 |
+ |
int isError, i; |
209 |
+ |
int *globalIndex = new int[mpiPlug->myNlocal]; |
210 |
+ |
|
211 |
+ |
for(i=0; i<mpiPlug->myNlocal; i++) globalIndex[i] = entryPlug->atoms[i]->getGlobalIndex(); |
212 |
+ |
|
213 |
+ |
|
214 |
+ |
isError = 0; |
215 |
+ |
setFsimParallel( mpiPlug, &(entryPlug->n_atoms), globalIndex, &isError ); |
216 |
+ |
if( isError ){ |
217 |
+ |
|
218 |
+ |
sprintf( painCave.errMsg, |
219 |
+ |
"mpiRefresh errror: fortran didn't like something we gave it.\n" ); |
220 |
+ |
painCave.isFatal = 1; |
221 |
+ |
simError(); |
222 |
+ |
} |
223 |
+ |
|
224 |
+ |
delete[] globalIndex; |
225 |
+ |
|
226 |
+ |
sprintf( checkPointMsg, |
227 |
+ |
" mpiRefresh successful.\n" ); |
228 |
+ |
MPIcheckPoint(); |
229 |
+ |
} |
230 |
+ |
|