ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE/libmdtools/ZConsWriter.cpp
(Generate patch)

Comparing trunk/OOPSE/libmdtools/ZConsWriter.cpp (file contents):
Revision 658 by tim, Thu Jul 31 15:35:07 2003 UTC vs.
Revision 699 by tim, Fri Aug 15 19:24:13 2003 UTC

# Line 7 | Line 7 | ZConsWriter::ZConsWriter(const char* filename)
7  
8   using namespace std;
9  
10 < ZConsWriter::ZConsWriter(const char* filename)
10 > ZConsWriter::ZConsWriter(const char* filename, vector<ZConsParaItem>* thePara)
11   {
12    //use master - slave mode, only master node writes to disk
13   #ifdef IS_MPI
# Line 24 | Line 24 | ZConsWriter::ZConsWriter(const char* filename)
24       simError();
25     }
26     output << "#number of z constrain molecules" << endl;
27 <   output << "#global Index of molecule\trefZ" << endl;
27 >   output << "#global Index of molecule\tzPos" << endl;
28  
29 +   parameters = thePara;
30 +        writeZPos();
31 +
32     output << "#time(fs)" << endl;
33 <   output << "#number of z constrain molecules" << endl;
34 <   output << "#global Index of molecule\tzconstrain force" << endl;
33 >   output << "#number of fixed z-constrain molecules" << endl;
34 >   output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl;
35 >
36 >
37   #ifdef IS_MPI
38    }
39   #endif  
# Line 47 | Line 52 | void ZConsWriter::writeFZ(double time, int num, int* i
52   #endif
53   }
54  
55 < void ZConsWriter::writeFZ(double time, int num, int* index, double* fz)
55 > void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos)
56   {
57  
58   #ifndef IS_MPI
54  vector<pair<int, double> > data; // The space between two ">" is needed. Otherwise, compileer
55                                   // will take it as redirect symbol ">>"
56  
57  for(int i = 0; i < num ; i++)
58    data.push_back(pair<int, double>(index[i], fz[i]));
59  
59    output << time << endl;
60    output << num << endl;
61 <  
62 <  //sort data by index
63 <  sort(data.begin(), data.end());
65 <  
66 <  for(int i=0; i < data.size(); i++)
67 <    output << data[i].first << "\t" << data[i].second << endl;
68 <    
69 < #else  
61 >        
62 >  for(int i = 0; i < num; i++)
63 >    output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << endl;
64  
65 <  //master node will be responsible for receiving, assembling and writing data
66 <  if(worldRank == 0)
67 <  {
65 > #else
66 >
67 >  int whichNode;
68 >  enum CommType { RequesPosAndForce, EndOfRequest} status;
69 >  double pos;
70 >  double force;
71 >  int localIndex;
72 >  MPI_Status ierr;
73 >  int tag = 0;
74    
75 <    vector<pair<int,double> > data;
76 <    int numProcessors;
77 <    int recvCount;
78 <    int* indexBuf;
79 <    double* fzBuf;
80 <    MPI_Status istatus;
81 <        
82 <    //process the data in master
83 <    for(int i=0; i < num; i++){    
84 <      data.push_back(pair<int, double>(index[i], fz[i]));
85 <    }
75 >  if(worldRank == 0){
76      
77 <
78 <     numProcessors = mpiSim->getNumberProcessors();
77 >    int globalIndexOfCurMol;
78 >    int *MolToProcMap;
79 >    MolToProcMap = mpiSim->getMolToProcMap();
80      
81 <    //acquire the data from other nodes;
91 <    for(int whichNode = 1; whichNode < numProcessors; whichNode++){
92 <
81 >    for(int i = 0; i < parameters->size(); i++){
82        
83 <      MPI_Recv(&recvCount, 1, MPI_INT, whichNode,
84 <               0, MPI_COMM_WORLD, &istatus);          
83 >      globalIndexOfCurMol = (*parameters)[i].zconsIndex;
84 >      whichNode = MolToProcMap[globalIndexOfCurMol];
85        
86 <      if(recvCount > 0){
98 <      
99 <        indexBuf = new int[recvCount];
100 <        fzBuf = new double[recvCount];
86 >      if(whichNode == 0){
87          
88 <        if(!indexBuf || !fzBuf){
89 <          sprintf(painCave.errMsg,
90 <                  "Memory Allocation inside class ZConsWriter\n");
91 <          painCave.isFatal = 1;
92 <          simError();  
107 <        }    
108 <    
109 <        MPI_Recv(indexBuf, recvCount, MPI_INT, whichNode,
110 <                 0, MPI_COMM_WORLD, &istatus);
88 >             for(int j = 0; j < num; j++)
89 >              if(index[j] == globalIndexOfCurMol){
90 >                localIndex = j;
91 >                break;
92 >              }
93  
94 <        MPI_Recv(fzBuf, recvCount, MPI_DOUBLE_PRECISION, whichNode,
95 <                 0, MPI_COMM_WORLD, &istatus);
94 >                        force = fz[localIndex];
95 >                        pos = curZPos[localIndex];
96                    
115        //assemble the data
116        for(int i = 0; i < recvCount; i++){
117          data.push_back(pair<int, double>(indexBuf[i], fzBuf[i]));
118        }      
119
120                
121        delete[] indexBuf;
122        delete[] fzBuf;
123                
97        }
98 <                      
99 <    }
98 >      else{
99 >        status = RequesPosAndForce;
100 >        MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD);
101 >             MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD);
102 >        MPI_Recv(&force, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr);
103 >        MPI_Recv(&pos, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr);
104 >      }
105 >
106 >                output << globalIndexOfCurMol << "\t" << force << "\t" << pos << endl;
107 >              
108 >    } //End of Request Loop
109      
110 <    // sort the data by index
111 <    sort(data.begin(), data.end());
110 >    //Send ending request message to slave nodes    
111 >    status = EndOfRequest;
112 >    for(int i =1; i < mpiSim->getNumberProcessors(); i++)
113 >      MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD);
114 >    
115 >  }
116 >  else{
117 >  
118 >    int whichMol;
119 >    bool done = false;
120 >
121 >    while (!done){  
122 >      
123 >      MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr);
124      
125 <    output << time << endl;
126 <    output << data.size() << endl;
127 <    
128 <    for(int i = 0; i < data.size(); i++){
129 <    
130 <      output << data[i].first << "\t" << data[i].second << endl;
125 >      switch (status){
126 >          
127 >        case RequesPosAndForce :
128 >          
129 >               MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr);
130 >          
131 >               for(int i = 0; i < num; i++)
132 >                 if(index[i] == whichMol){
133 >                   localIndex = i;
134 >                   break;
135 >                 }
136 >          
137 >               MPI_Send(&fz[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);        
138 >               MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD);          
139 >               break;
140 >            
141 >        case EndOfRequest :
142 >        
143 >               done = true;
144 >               break;
145 >      }
146 >      
147      }
148 <    
139 <  }  
140 <  else
141 <  {
142 <    MPI_Send(&num, 1, MPI_INT, 0, 0, MPI_COMM_WORLD);
143 <
144 <    if(num > 0){
145 <      MPI_Send(index, num, MPI_INT, 0, 0, MPI_COMM_WORLD);
146 <      MPI_Send(fz, num, MPI_DOUBLE_PRECISION, 0, 0, MPI_COMM_WORLD);
147 <    }
148 >          
149    }
150  
151   #endif
152  
153   }
154  
155 < void ZConsWriter::writeRefZ(const vector<int>& index, const vector<double>& refZ){
155 > void ZConsWriter::writeZPos(){
156  
157   #ifdef IS_MPI
158    if(worldRank == 0){
159   #endif
160      
161 <     output << index.size() << endl;    
161 >     output << parameters->size() << endl;    
162      
163 <    for(int i =0 ; i < index.size(); i++)
164 <      output << index[i] << "\t" << refZ[i] << endl;
163 >    for(int i =0 ; i < parameters->size(); i++)
164 >      output << (*parameters)[i].zconsIndex << "\t" <<  (*parameters)[i].zPos << endl;
165  
166   #ifdef IS_MPI
167    }

Diff Legend

Removed lines
+ Added lines
< Changed lines
> Changed lines