--- trunk/OOPSE/libmdtools/ZConsWriter.cpp 2003/07/31 15:35:07 658 +++ trunk/OOPSE/libmdtools/ZConsWriter.cpp 2003/08/15 19:24:13 699 @@ -7,7 +7,7 @@ ZConsWriter::ZConsWriter(const char* filename) using namespace std; -ZConsWriter::ZConsWriter(const char* filename) +ZConsWriter::ZConsWriter(const char* filename, vector* thePara) { //use master - slave mode, only master node writes to disk #ifdef IS_MPI @@ -24,11 +24,16 @@ ZConsWriter::ZConsWriter(const char* filename) simError(); } output << "#number of z constrain molecules" << endl; - output << "#global Index of molecule\trefZ" << endl; + output << "#global Index of molecule\tzPos" << endl; + parameters = thePara; + writeZPos(); + output << "#time(fs)" << endl; - output << "#number of z constrain molecules" << endl; - output << "#global Index of molecule\tzconstrain force" << endl; + output << "#number of fixed z-constrain molecules" << endl; + output << "#global Index of molecule\tzconstrain force\tcurrentZPos" << endl; + + #ifdef IS_MPI } #endif @@ -47,120 +52,116 @@ void ZConsWriter::writeFZ(double time, int num, int* i #endif } -void ZConsWriter::writeFZ(double time, int num, int* index, double* fz) +void ZConsWriter::writeFZ(double time, int num, int* index, double* fz, double* curZPos) { #ifndef IS_MPI - vector > data; // The space between two ">" is needed. Otherwise, compileer - // will take it as redirect symbol ">>" - - for(int i = 0; i < num ; i++) - data.push_back(pair(index[i], fz[i])); - output << time << endl; output << num << endl; - - //sort data by index - sort(data.begin(), data.end()); - - for(int i=0; i < data.size(); i++) - output << data[i].first << "\t" << data[i].second << endl; - -#else + + for(int i = 0; i < num; i++) + output << index[i] <<"\t" << fz[i] << "\t" << curZPos[i] << endl; - //master node will be responsible for receiving, assembling and writing data - if(worldRank == 0) - { +#else + + int whichNode; + enum CommType { RequesPosAndForce, EndOfRequest} status; + double pos; + double force; + int localIndex; + MPI_Status ierr; + int tag = 0; - vector > data; - int numProcessors; - int recvCount; - int* indexBuf; - double* fzBuf; - MPI_Status istatus; - - //process the data in master - for(int i=0; i < num; i++){ - data.push_back(pair(index[i], fz[i])); - } + if(worldRank == 0){ - - numProcessors = mpiSim->getNumberProcessors(); + int globalIndexOfCurMol; + int *MolToProcMap; + MolToProcMap = mpiSim->getMolToProcMap(); - //acquire the data from other nodes; - for(int whichNode = 1; whichNode < numProcessors; whichNode++){ - + for(int i = 0; i < parameters->size(); i++){ - MPI_Recv(&recvCount, 1, MPI_INT, whichNode, - 0, MPI_COMM_WORLD, &istatus); + globalIndexOfCurMol = (*parameters)[i].zconsIndex; + whichNode = MolToProcMap[globalIndexOfCurMol]; - if(recvCount > 0){ - - indexBuf = new int[recvCount]; - fzBuf = new double[recvCount]; + if(whichNode == 0){ - if(!indexBuf || !fzBuf){ - sprintf(painCave.errMsg, - "Memory Allocation inside class ZConsWriter\n"); - painCave.isFatal = 1; - simError(); - } - - MPI_Recv(indexBuf, recvCount, MPI_INT, whichNode, - 0, MPI_COMM_WORLD, &istatus); + for(int j = 0; j < num; j++) + if(index[j] == globalIndexOfCurMol){ + localIndex = j; + break; + } - MPI_Recv(fzBuf, recvCount, MPI_DOUBLE_PRECISION, whichNode, - 0, MPI_COMM_WORLD, &istatus); + force = fz[localIndex]; + pos = curZPos[localIndex]; - //assemble the data - for(int i = 0; i < recvCount; i++){ - data.push_back(pair(indexBuf[i], fzBuf[i])); - } - - - delete[] indexBuf; - delete[] fzBuf; - } - - } + else{ + status = RequesPosAndForce; + MPI_Send(&status, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); + MPI_Send(&globalIndexOfCurMol, 1, MPI_INT, whichNode, tag, MPI_COMM_WORLD); + MPI_Recv(&force, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); + MPI_Recv(&pos, 1, MPI_DOUBLE_PRECISION, whichNode, tag, MPI_COMM_WORLD, &ierr); + } + + output << globalIndexOfCurMol << "\t" << force << "\t" << pos << endl; + + } //End of Request Loop - // sort the data by index - sort(data.begin(), data.end()); + //Send ending request message to slave nodes + status = EndOfRequest; + for(int i =1; i < mpiSim->getNumberProcessors(); i++) + MPI_Send(&status, 1, MPI_INT, i, tag, MPI_COMM_WORLD); + + } + else{ + + int whichMol; + bool done = false; + + while (!done){ + + MPI_Recv(&status, 1, MPI_INT, 0, tag, MPI_COMM_WORLD, &ierr); - output << time << endl; - output << data.size() << endl; - - for(int i = 0; i < data.size(); i++){ - - output << data[i].first << "\t" << data[i].second << endl; + switch (status){ + + case RequesPosAndForce : + + MPI_Recv(&whichMol, 1, MPI_INT, 0, tag, MPI_COMM_WORLD,&ierr); + + for(int i = 0; i < num; i++) + if(index[i] == whichMol){ + localIndex = i; + break; + } + + MPI_Send(&fz[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD); + MPI_Send(&curZPos[localIndex], 1, MPI_DOUBLE_PRECISION, 0, tag, MPI_COMM_WORLD); + break; + + case EndOfRequest : + + done = true; + break; + } + } - - } - else - { - MPI_Send(&num, 1, MPI_INT, 0, 0, MPI_COMM_WORLD); - - if(num > 0){ - MPI_Send(index, num, MPI_INT, 0, 0, MPI_COMM_WORLD); - MPI_Send(fz, num, MPI_DOUBLE_PRECISION, 0, 0, MPI_COMM_WORLD); - } + } #endif } -void ZConsWriter::writeRefZ(const vector& index, const vector& refZ){ +void ZConsWriter::writeZPos(){ #ifdef IS_MPI if(worldRank == 0){ #endif - output << index.size() << endl; + output << parameters->size() << endl; - for(int i =0 ; i < index.size(); i++) - output << index[i] << "\t" << refZ[i] << endl; + for(int i =0 ; i < parameters->size(); i++) + output << (*parameters)[i].zconsIndex << "\t" << (*parameters)[i].zPos << endl; #ifdef IS_MPI }