ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/ZConsWriter.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/ZConsWriter.cpp (file contents):
Revision 658 by tim, Thu Jul 31 15:35:07 2003 UTC vs.
Revision 738 by tim, Tue Sep 2 14:30:12 2003 UTC

# Line 7 | Line 7 | ZConsWriter::ZConsWriter(const char* filename)
7  
8   using namespace std;
9  
10 < ZConsWriter::ZConsWriter(const char* filename)
10 > ZConsWriter::ZConsWriter(const char* filename, vector<ZConsParaItem>* thePara)
11   {
12    //use master - slave mode, only master node writes to disk
13   #ifdef IS_MPI
# Line 18 | Line 18 | ZConsWriter::ZConsWriter(const char* filename)
18    
19     if(!output){
20       sprintf( painCave.errMsg,
21 <              "Could not open \"s\" for z constrain output \n",
22 <               filename);
21 >              "Could not open %s for z constrain output \n",
22 >         filename);
23       painCave.isFatal = 1;
24       simError();
25     }
26     output << "#number of z constrain molecules" << endl;
27 <   output << "#global Index of molecule\trefZ" << endl;
28 <
27 >   output << "#global Index of molecule\tzPos" << endl;
28 >   output << "#every frame will contain below data" <<endl;
29     output << "#time(fs)" << endl;
30 <   output << "#number of z constrain molecules" << endl;
31 <   output << "#global Index of molecule\tzconstrain force" << endl;
30 >   output << "#number of fixed z-constrain molecules" << endl;
31 >   output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl;
32 >
33 >   parameters = thePara;
34 >   writeZPos();
35 >
36   #ifdef IS_MPI
37    }
38   #endif  
# Line 47 | Line 51 | void ZConsWriter::writeFZ(double time, int num, int* i
51   #endif
52   }
53  
54 < void ZConsWriter::writeFZ(double time, int num, int* index, double* fz)
55 < {
54 > /**
55 > *
56 > */
57 > void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos){
58  
59   #ifndef IS_MPI
54  vector<pair<int, double> > data; // The space between two ">" is needed. Otherwise, compileer
55                                   // will take it as redirect symbol ">>"
56  
57  for(int i = 0; i < num ; i++)
58    data.push_back(pair<int, double>(index[i], fz[i]));
59  
60    output << time << endl;
61    output << num << endl;
62    
63 <  //sort data by index
64 <  sort(data.begin(), data.end());
65 <  
66 <  for(int i=0; i < data.size(); i++)
67 <    output << data[i].first << "\t" << data[i].second << endl;
68 <    
69 < #else  
63 >  for(int i = 0; i < num; i++)
64 >    output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << endl;
65  
66 <  //master node will be responsible for receiving, assembling and writing data
67 <  if(worldRank == 0)
68 <  {
66 > #else
67 >  int totalNum;
68 >  MPI_Allreduce(&num, &totalNum, 1, MPI_INT,MPI_SUM, MPI_COMM_WORLD);
69    
70 <    vector<pair<int,double> > data;
71 <    int numProcessors;
72 <    int recvCount;
73 <    int* indexBuf;
74 <    double* fzBuf;
75 <    MPI_Status istatus;
76 <        
77 <    //process the data in master
78 <    for(int i=0; i < num; i++){    
79 <      data.push_back(pair<int, double>(index[i], fz[i]));
80 <    }
70 >  if(worldRank == 0){
71 >    output << time << endl;
72 >    output << totalNum << endl;
73 >  }
74 >  
75 >  int whichNode;
76 >  enum CommType { RequesPosAndForce, EndOfRequest} status;
77 >  double pos;
78 >  double force;
79 >  int localIndex;
80 >  MPI_Status ierr;
81 >  int tag = 0;
82 >  
83 >  if(worldRank == 0){
84      
85 <
86 <     numProcessors = mpiSim->getNumberProcessors();
85 >    int globalIndexOfCurMol;
86 >    int *MolToProcMap;
87 >    MolToProcMap = mpiSim->getMolToProcMap();
88      
89 <    //acquire the data from other nodes;
91 <    for(int whichNode = 1; whichNode < numProcessors; whichNode++){
92 <
89 >    for(int i = 0; i < parameters->size(); i++){
90        
91 <      MPI_Recv(&recvCount, 1, MPI_INT, whichNode,
92 <               0, MPI_COMM_WORLD, &istatus);          
91 >      globalIndexOfCurMol = (*parameters)[i].zconsIndex;
92 >      whichNode = MolToProcMap[globalIndexOfCurMol];
93        
94 <      if(recvCount > 0){
98 <      
99 <        indexBuf = new int[recvCount];
100 <        fzBuf = new double[recvCount];
94 >      if(whichNode == 0){
95          
96 <        if(!indexBuf || !fzBuf){
97 <          sprintf(painCave.errMsg,
98 <                  "Memory Allocation inside class ZConsWriter\n");
99 <          painCave.isFatal = 1;
100 <          simError();  
107 <        }    
108 <    
109 <        MPI_Recv(indexBuf, recvCount, MPI_INT, whichNode,
110 <                 0, MPI_COMM_WORLD, &istatus);
96 >       for(int j = 0; j < num; j++)
97 >        if(index[j] == globalIndexOfCurMol){
98 >          localIndex = j;
99 >          break;
100 >        }
101  
102 <        MPI_Recv(fzBuf, recvCount, MPI_DOUBLE_PRECISION, whichNode,
103 <                 0, MPI_COMM_WORLD, &istatus);
104 <                  
115 <        //assemble the data
116 <        for(int i = 0; i < recvCount; i++){
117 <          data.push_back(pair<int, double>(indexBuf[i], fzBuf[i]));
118 <        }      
119 <
120 <                
121 <        delete[] indexBuf;
122 <        delete[] fzBuf;
123 <                
102 >      force = fz[localIndex];
103 >      pos = curZPos[localIndex];
104 >      
105        }
106 <                      
107 <    }
106 >      else{
107 >        status = RequesPosAndForce;
108 >        MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD);
109 >        MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD);
110 >        MPI_Recv(&force, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr);
111 >        MPI_Recv(&pos, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr);
112 >      }
113 >
114 >     output << globalIndexOfCurMol << "\t" << force << "\t" << pos << endl;
115 >              
116 >    } //End of Request Loop
117      
118 <    // sort the data by index
119 <    sort(data.begin(), data.end());
118 >    //Send ending request message to slave nodes    
119 >    status = EndOfRequest;
120 >    for(int i =1; i < mpiSim->getNumberProcessors(); i++)
121 >      MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD);
122 >    
123 >  }
124 >  else{
125 >  
126 >    int whichMol;
127 >    bool done = false;
128 >
129 >    while (!done){  
130 >      
131 >      MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr);
132      
133 <    output << time << endl;
134 <    output << data.size() << endl;
133 >      switch (status){
134 >          
135 >         case RequesPosAndForce :
136 >          
137 >           MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr);
138      
139 <    for(int i = 0; i < data.size(); i++){
139 >           for(int i = 0; i < num; i++)
140 >           if(index[i] == whichMol){
141 >             localIndex = i;
142 >             break;
143 >           }
144      
145 <      output << data[i].first << "\t" << data[i].second << endl;
145 >           MPI_Send(&fz[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);    
146 >           MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);      
147 >           break;
148 >      
149 >        case EndOfRequest :
150 >        
151 >         done = true;
152 >         break;
153 >      }
154 >      
155      }
156 <    
139 <  }  
140 <  else
141 <  {
142 <    MPI_Send(&num, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
143 <
144 <    if(num > 0){
145 <      MPI_Send(index, num, MPI_INT, 0, 0, MPI_COMM_WORLD);
146 <      MPI_Send(fz, num, MPI_DOUBLE_PRECISION, 0, 0, MPI_COMM_WORLD);
147 <    }
156 >          
157    }
158  
159   #endif
160  
161   }
162  
163 < void ZConsWriter::writeRefZ(const vector<int>& index, const vector<double>& refZ){
163 > /*
164 > *
165 > */
166 > void ZConsWriter::writeZPos(){
167  
168   #ifdef IS_MPI
169    if(worldRank == 0){
170   #endif
171      
172 <     output << index.size() << endl;    
172 >    output << parameters->size() << endl;    
173      
174 <    for(int i =0 ; i < index.size(); i++)
175 <      output << index[i] << "\t" << refZ[i] << endl;
174 >    for(int i =0 ; i < parameters->size(); i++)
175 >      output << (*parameters)[i].zconsIndex << "\t" <<  (*parameters)[i].zPos << endl;
176  
177   #ifdef IS_MPI
178    }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines