1 |
#include <algorithm> |
2 |
#include <iostream> |
3 |
#include <vector> |
4 |
|
5 |
|
6 |
#include "io/ZConsWriter.hpp" |
7 |
#include "utils/simError.h" |
8 |
|
9 |
|
10 |
|
11 |
ZConsWriter::ZConsWriter(const char* filename, std::vector<ZConsParaItem>* thePara) |
12 |
{ |
13 |
//use master - slave mode, only master node writes to disk |
14 |
#ifdef IS_MPI |
15 |
if(worldRank == 0){ |
16 |
#endif |
17 |
|
18 |
output.open(filename); |
19 |
|
20 |
if(!output){ |
21 |
sprintf( painCave.errMsg, |
22 |
"Could not open %s for z constrain output \n", |
23 |
filename); |
24 |
painCave.isFatal = 1; |
25 |
simError(); |
26 |
} |
27 |
output << "#number of z constrain molecules" << std::endl; |
28 |
output << "#global Index of molecule\tzPos" << std::endl; |
29 |
output << "#every frame will contain below data" <<std::endl; |
30 |
output << "#time(fs)" << std::endl; |
31 |
output << "#number of fixed z-constrain molecules" << std::endl; |
32 |
output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << std::endl; |
33 |
|
34 |
parameters = thePara; |
35 |
writeZPos(); |
36 |
|
37 |
#ifdef IS_MPI |
38 |
} |
39 |
#endif |
40 |
|
41 |
} |
42 |
|
43 |
ZConsWriter::~ZConsWriter() |
44 |
{ |
45 |
|
46 |
#ifdef IS_MPI |
47 |
if(worldRank == 0 ){ |
48 |
#endif |
49 |
output.close(); |
50 |
#ifdef IS_MPI |
51 |
} |
52 |
#endif |
53 |
} |
54 |
|
55 |
/** |
56 |
* |
57 |
*/ |
58 |
void ZConsWriter::writeFZ(SimInfo* info, double time, int num, int* index, double* fz, double* curZPos, double* zpos){ |
59 |
|
60 |
#ifndef IS_MPI |
61 |
output << time << std::endl; |
62 |
output << num << std::endl; |
63 |
|
64 |
for(int i = 0; i < num; i++) |
65 |
output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << "\t" << zpos[i] <<std::endl; |
66 |
|
67 |
#else |
68 |
int totalNum; |
69 |
MPI_Allreduce(&num, &totalNum, 1, MPI_INT,MPI_SUM, MPI_COMM_WORLD); |
70 |
|
71 |
if(worldRank == 0){ |
72 |
output << time << std::endl; |
73 |
output << totalNum << std::endl; |
74 |
} |
75 |
|
76 |
int whichNode; |
77 |
enum CommType { RequesPosAndForce, EndOfRequest} status; |
78 |
double pos; |
79 |
double force; |
80 |
double zconsPos; |
81 |
int localIndex; |
82 |
MPI_Status ierr; |
83 |
int tag = 0; |
84 |
|
85 |
if(worldRank == 0){ |
86 |
|
87 |
int globalIndexOfCurMol; |
88 |
|
89 |
for(int i = 0; i < (int)(parameters->size()); i++){ |
90 |
|
91 |
globalIndexOfCurMol = (*parameters)[i].zconsIndex; |
92 |
whichNode = info->getMolToProc(globalIndexOfCurMol); |
93 |
|
94 |
if(whichNode == 0){ |
95 |
|
96 |
for(int j = 0; j < num; j++) |
97 |
if(index[j] == globalIndexOfCurMol){ |
98 |
localIndex = j; |
99 |
break; |
100 |
} |
101 |
|
102 |
force = fz[localIndex]; |
103 |
pos = curZPos[localIndex]; |
104 |
|
105 |
} |
106 |
else{ |
107 |
status = RequesPosAndForce; |
108 |
MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
109 |
MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); |
110 |
MPI_Recv(&force, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
111 |
MPI_Recv(&pos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
112 |
MPI_Recv(&zconsPos, 1, MPI_DOUBLE, whichNode, tag, MPI_COMM_WORLD, &ierr); |
113 |
} |
114 |
|
115 |
output << globalIndexOfCurMol << "\t" << force << "\t" << pos << "\t"<< zconsPos << std::endl; |
116 |
|
117 |
} //End of Request Loop |
118 |
|
119 |
//Send ending request message to slave nodes |
120 |
status = EndOfRequest; |
121 |
|
122 |
int nproc; |
123 |
MPI_Comm_size(MPI_COMM_WORLD, &nproc); |
124 |
|
125 |
for(int i =1; i < nproc; i++) |
126 |
MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); |
127 |
|
128 |
} |
129 |
else{ |
130 |
|
131 |
int whichMol; |
132 |
bool done = false; |
133 |
|
134 |
while (!done){ |
135 |
|
136 |
MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr); |
137 |
|
138 |
switch (status){ |
139 |
|
140 |
case RequesPosAndForce : |
141 |
|
142 |
MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr); |
143 |
|
144 |
for(int i = 0; i < num; i++) |
145 |
if(index[i] == whichMol){ |
146 |
localIndex = i; |
147 |
break; |
148 |
} |
149 |
|
150 |
MPI_Send(&fz[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
151 |
MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
152 |
MPI_Send(&zpos[localIndex], 1, MPI_DOUBLE, 0, tag, MPI_COMM_WORLD); |
153 |
break; |
154 |
|
155 |
case EndOfRequest : |
156 |
|
157 |
done = true; |
158 |
break; |
159 |
} |
160 |
|
161 |
} |
162 |
|
163 |
} |
164 |
|
165 |
#endif |
166 |
|
167 |
} |
168 |
|
169 |
/* |
170 |
* |
171 |
*/ |
172 |
void ZConsWriter::writeZPos(){ |
173 |
|
174 |
#ifdef IS_MPI |
175 |
if(worldRank == 0){ |
176 |
#endif |
177 |
|
178 |
output << parameters->size() << std::endl; |
179 |
|
180 |
for(int i =0 ; i < (int)(parameters->size()); i++) |
181 |
output << (*parameters)[i].zconsIndex << "\t" << (*parameters)[i].zPos << std::endl; |
182 |
|
183 |
#ifdef IS_MPI |
184 |
} |
185 |
#endif |
186 |
} |