# | Line 1 | Line 1 | |
---|---|---|
1 | + | i#include <cstdlib> |
2 | + | #include <cstring> |
3 | #include <mpi.h> | |
2 | – | #include <mpiSimulation.hpp> |
4 | ||
5 | < | mpiSimulation::mpiSimulation(void) |
5 | > | #include "mpiSimulation.hpp" |
6 | > | #include "simError.h" |
7 | > | |
8 | > | extern "C"{ |
9 | > | void wrapsimparallelmod_( void (*wrapFunction)(void (*fSub)( mpiSimData*, |
10 | > | int*, int*, |
11 | > | int*))); |
12 | > | } |
13 | > | |
14 | > | void wrapSimParallel((void (*fSub)(mpiSimData*, int*, int*, int*))); |
15 | > | |
16 | > | |
17 | > | mpiSimulation* mpiSim; |
18 | > | |
19 | > | mpiSimulation::mpiSimulation(SimInfo* the_entryPlug) |
20 | { | |
21 | + | entryPlug = the_entryPlug; |
22 | + | mpiPlug = new MpiSimData; |
23 | + | |
24 | + | mpiPlug->numberProcessors = MPI::COMM_WORLD.Get_size(); |
25 | + | mpiPlug->myNode = worldRank; |
26 | + | |
27 | + | mpiSim = this; |
28 | + | wrapMe(); |
29 | ||
30 | < | MPI::Init(); |
30 | > | } |
31 | ||
32 | < | numberProcessors = MPI::COMM_WORLD.Get_size(); |
33 | < | myNode = MPI::COMM_WORLD.Get_rank(); |
34 | < | MPI::Get_processor_name(processorName,processorNameLen); |
32 | > | |
33 | > | mpiSimulation::~mpiSimulation(){ |
34 | > | |
35 | > | delete mpiPlug; |
36 | > | // perhaps we should let fortran know the party is over. |
37 | > | |
38 | } | |
39 | ||
40 | + | void mpiSimulation::wrapMe(){ |
41 | ||
42 | + | wrapsimparallelmod_( wrapSimParallel ); |
43 | + | } |
44 | + | |
45 | + | |
46 | + | |
47 | + | void mpiSimulation::divideLabor( void ){ |
48 | + | |
49 | + | int nComponents; |
50 | + | MoleculeStamp** compStamps; |
51 | + | int* componentsNmol; |
52 | + | |
53 | + | double numerator; |
54 | + | double denominator; |
55 | + | double precast; |
56 | + | |
57 | + | int nTarget; |
58 | + | int molIndex, atomIndex, compIndex, compStart; |
59 | + | int done; |
60 | + | int nLocal, molLocal; |
61 | + | int i; |
62 | + | int smallDiff, bigDiff; |
63 | + | |
64 | + | int testSum; |
65 | + | |
66 | + | nComponents = entryPlug->nComponents; |
67 | + | compStamps = entryPlug->compStamps; |
68 | + | componentsNmol = entryPlug->componentsNmol; |
69 | + | |
70 | + | mpiPlug->nAtomsGlobal = entryPlug->n_atoms; |
71 | + | mpiPlug->nBondsGlobal = entryPlug->n_bonds; |
72 | + | mpiPlug->nBendsGlobal = entryPlug->n_bends; |
73 | + | mpiPlug->nTorsionsGlobal = entryPlug->n_torsions; |
74 | + | mpiPlug->nSRIGlobal = entryPlug->n_SRI; |
75 | + | mpiPlug->nMolGlobal = entryPlug->n_nmol; |
76 | + | |
77 | + | numerator = (double) entryPlug->n_atoms; |
78 | + | denominator = (double) mpiPlug->numberProcessors; |
79 | + | precast = numerator / denominator; |
80 | + | nTarget = (int)( precast + 0.5 ); |
81 | + | |
82 | + | molIndex = 0; |
83 | + | atomIndex = 0; |
84 | + | compIndex = 0; |
85 | + | compStart = 0; |
86 | + | for( i=0; i<(mpiPlug->numberProcessors-1); i++){ |
87 | + | |
88 | + | done = 0; |
89 | + | nLocal = 0; |
90 | + | molLocal = 0; |
91 | + | |
92 | + | if( i == mpiPlug->myNode ){ |
93 | + | mpiPlug->myMolStart = molIndex; |
94 | + | mpiPlug->myAtomStart = atomIndex; |
95 | + | } |
96 | + | |
97 | + | while( !done ){ |
98 | + | |
99 | + | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
100 | + | compStart = molIndex; |
101 | + | compIndex++; |
102 | + | continue; |
103 | + | } |
104 | + | |
105 | + | nLocal += compStamps[compIndex]->getNAtoms(); |
106 | + | atomIndex += compStamps[compIndex]->getNAtoms(); |
107 | + | molIndex++; |
108 | + | molLocal++; |
109 | + | |
110 | + | if ( nLocal == nTarget ) done = 1; |
111 | + | |
112 | + | else if( nLocal < nTarget ){ |
113 | + | smallDiff = nTarget - nLocal; |
114 | + | } |
115 | + | else if( nLocal > nTarget ){ |
116 | + | bigDiff = nLocal - nTarget; |
117 | + | |
118 | + | if( bigDiff < smallDiff ) done = 1; |
119 | + | else{ |
120 | + | molIndex--; |
121 | + | molLocal--; |
122 | + | atomIndex -= compStamps[compIndex]->getNAtoms(); |
123 | + | nLocal -= compStamps[compIndex]->getNAtoms(); |
124 | + | done = 1; |
125 | + | } |
126 | + | } |
127 | + | } |
128 | + | |
129 | + | if( i == mpiPlug->myNode ){ |
130 | + | mpiPlug->myMolEnd = (molIndex - 1); |
131 | + | mpiPlug->myAtomEnd = (atomIndex - 1); |
132 | + | mpiPlug->myNlocal = nLocal; |
133 | + | mpiPlug->myMol = molLocal; |
134 | + | } |
135 | + | |
136 | + | numerator = (double)( entryPlug->n_atoms - atomIndex ); |
137 | + | denominator = (double)( mpiPlug->numberProcessors - (i+1) ); |
138 | + | precast = numerator / denominator; |
139 | + | nTarget = (int)( precast + 0.5 ); |
140 | + | } |
141 | + | |
142 | + | if( mpiPlug->myNode == mpiPlug->numberProcessors-1 ){ |
143 | + | mpiPlug->myMolStart = molIndex; |
144 | + | mpiPlug->myAtomStart = atomIndex; |
145 | + | |
146 | + | nLocal = 0; |
147 | + | molLocal = 0; |
148 | + | while( compIndex < nComponents ){ |
149 | + | |
150 | + | if( (molIndex-compStart) >= componentsNmol[compIndex] ){ |
151 | + | compStart = molIndex; |
152 | + | compIndex++; |
153 | + | continue; |
154 | + | } |
155 | + | |
156 | + | nLocal += compStamps[compIndex]->getNAtoms(); |
157 | + | atomIndex += compStamps[compIndex]->getNAtoms(); |
158 | + | molIndex++; |
159 | + | molLocal++; |
160 | + | } |
161 | + | |
162 | + | mpiPlug->myMolEnd = (molIndex - 1); |
163 | + | mpiPlug->myAtomEnd = (atomIndex - 1); |
164 | + | mpiPlug->myNlocal = nLocal; |
165 | + | mpiPlug->myMol = molLocal; |
166 | + | } |
167 | + | |
168 | + | |
169 | + | MPI_Allreduce( &nLocal, &testSum, 1, MPI_INT, MPI_SUM, MPI_COMM_WORLD ); |
170 | + | |
171 | + | if( mpiPlug->myNode == 0 ){ |
172 | + | if( testSum != entryPlug->n_atoms ){ |
173 | + | sprintf( painCave.errMsg, |
174 | + | "The summ of all nLocals, %d, did not equal the total number of atoms, %d.\n", |
175 | + | testSum, entryPlug->n_atoms ); |
176 | + | painCave.isFatal = 1; |
177 | + | simError(); |
178 | + | } |
179 | + | } |
180 | + | |
181 | + | sprintf( checkPointMsg, |
182 | + | "Successfully divided the molecules among the processors.\n" ); |
183 | + | MPIcheckPoint(); |
184 | + | |
185 | + | // lets create the identity array |
186 | + | } |
187 | + | |
188 | + | |
189 | + | void wrapSimParallel((void (*fSub)(mpiSimData*, int*, int*, int*))){ |
190 | + | |
191 | + | mpiSim->setInternal( fSub ); |
192 | + | } |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |