35 |
|
* |
36 |
|
* [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). |
37 |
|
* [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). |
38 |
< |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). |
38 |
> |
* [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 234107 (2008). |
39 |
|
* [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). |
40 |
|
* [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). |
41 |
|
*/ |
44 |
|
* @file SimCreator.cpp |
45 |
|
* @author tlin |
46 |
|
* @date 11/03/2004 |
47 |
– |
* @time 13:51am |
47 |
|
* @version 1.0 |
48 |
|
*/ |
49 |
|
#include <exception> |
55 |
|
#include "brains/SimCreator.hpp" |
56 |
|
#include "brains/SimSnapshotManager.hpp" |
57 |
|
#include "io/DumpReader.hpp" |
58 |
< |
#include "UseTheForce/ForceFieldFactory.hpp" |
58 |
> |
#include "brains/ForceField.hpp" |
59 |
|
#include "utils/simError.h" |
60 |
|
#include "utils/StringUtils.hpp" |
61 |
|
#include "math/SeqRandNumGen.hpp" |
99 |
|
#ifdef IS_MPI |
100 |
|
int streamSize; |
101 |
|
const int masterNode = 0; |
102 |
< |
int commStatus; |
102 |
> |
|
103 |
|
if (worldRank == masterNode) { |
104 |
< |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
104 |
> |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
105 |
|
#endif |
106 |
|
SimplePreprocessor preprocessor; |
107 |
|
preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); |
109 |
|
#ifdef IS_MPI |
110 |
|
//brocasting the stream size |
111 |
|
streamSize = ppStream.str().size() +1; |
112 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
113 |
< |
|
114 |
< |
commStatus = MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
116 |
< |
|
117 |
< |
|
112 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
113 |
> |
MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); |
114 |
> |
|
115 |
|
} else { |
116 |
+ |
MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); |
117 |
|
|
120 |
– |
commStatus = MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
121 |
– |
|
118 |
|
//get stream size |
119 |
< |
commStatus = MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); |
119 |
> |
MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); |
120 |
|
|
121 |
|
char* buf = new char[streamSize]; |
122 |
|
assert(buf); |
123 |
|
|
124 |
|
//receive file content |
125 |
< |
commStatus = MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); |
125 |
> |
MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); |
126 |
|
|
127 |
|
ppStream.str(buf); |
128 |
|
delete [] buf; |
133 |
– |
|
129 |
|
} |
130 |
|
#endif |
131 |
|
// Create a scanner that reads from the input stream |
250 |
|
std::string mdRawData; |
251 |
|
int metaDataBlockStart = -1; |
252 |
|
int metaDataBlockEnd = -1; |
253 |
< |
int i; |
254 |
< |
int mdOffset; |
253 |
> |
int i, j; |
254 |
> |
streamoff mdOffset; |
255 |
|
int mdFileVersion; |
256 |
|
|
257 |
+ |
// Create a string for embedding the version information in the MetaData |
258 |
+ |
std::string version; |
259 |
+ |
version.assign("## Last run using OpenMD Version: "); |
260 |
+ |
version.append(OPENMD_VERSION_MAJOR); |
261 |
+ |
version.append("."); |
262 |
+ |
version.append(OPENMD_VERSION_MINOR); |
263 |
+ |
|
264 |
+ |
std::string svnrev; |
265 |
+ |
//convert a macro from compiler to a string in c++ |
266 |
+ |
STR_DEFINE(svnrev, SVN_REV ); |
267 |
+ |
version.append(" Revision: "); |
268 |
+ |
// If there's no SVN revision, just call this the RELEASE revision. |
269 |
+ |
if (!svnrev.empty()) { |
270 |
+ |
version.append(svnrev); |
271 |
+ |
} else { |
272 |
+ |
version.append("RELEASE"); |
273 |
+ |
} |
274 |
+ |
|
275 |
|
#ifdef IS_MPI |
276 |
|
const int masterNode = 0; |
277 |
|
if (worldRank == masterNode) { |
278 |
|
#endif |
279 |
|
|
280 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
280 |
> |
std::ifstream mdFile_; |
281 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
282 |
|
|
283 |
|
if (mdFile_.fail()) { |
284 |
|
sprintf(painCave.errMsg, |
366 |
|
|
367 |
|
mdRawData.clear(); |
368 |
|
|
369 |
+ |
bool foundVersion = false; |
370 |
+ |
|
371 |
|
for (int i = 0; i < metaDataBlockEnd - metaDataBlockStart - 1; ++i) { |
372 |
|
mdFile_.getline(buffer, bufferSize); |
373 |
< |
mdRawData += buffer; |
373 |
> |
std::string line = trimLeftCopy(buffer); |
374 |
> |
j = CaseInsensitiveFind(line, "## Last run using OpenMD Version"); |
375 |
> |
if (static_cast<size_t>(j) != string::npos) { |
376 |
> |
foundVersion = true; |
377 |
> |
mdRawData += version; |
378 |
> |
} else { |
379 |
> |
mdRawData += buffer; |
380 |
> |
} |
381 |
|
mdRawData += "\n"; |
382 |
|
} |
383 |
< |
|
383 |
> |
|
384 |
> |
if (!foundVersion) mdRawData += version + "\n"; |
385 |
> |
|
386 |
|
mdFile_.close(); |
387 |
|
|
388 |
|
#ifdef IS_MPI |
396 |
|
metaDataBlockStart + 1); |
397 |
|
|
398 |
|
//create the force field |
399 |
< |
ForceField * ff = ForceFieldFactory::getInstance()->createForceField(simParams->getForceField()); |
399 |
> |
ForceField * ff = new ForceField(simParams->getForceField()); |
400 |
|
|
401 |
|
if (ff == NULL) { |
402 |
|
sprintf(painCave.errMsg, |
522 |
|
int nTarget; |
523 |
|
int done; |
524 |
|
int i; |
500 |
– |
int j; |
525 |
|
int loops; |
526 |
|
int which_proc; |
527 |
|
int nProcessors; |
529 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
530 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
531 |
|
|
532 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
532 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
533 |
|
|
534 |
|
if (nProcessors > nGlobalMols) { |
535 |
|
sprintf(painCave.errMsg, |
567 |
|
nTarget = (int)(precast + 0.5); |
568 |
|
|
569 |
|
for(i = 0; i < nGlobalMols; i++) { |
570 |
+ |
|
571 |
|
done = 0; |
572 |
|
loops = 0; |
573 |
|
|
592 |
|
// and be done with it. |
593 |
|
|
594 |
|
if (loops > 100) { |
595 |
+ |
|
596 |
|
sprintf(painCave.errMsg, |
597 |
< |
"I've tried 100 times to assign molecule %d to a " |
598 |
< |
" processor, but can't find a good spot.\n" |
599 |
< |
"I'm assigning it at random to processor %d.\n", |
597 |
> |
"There have been 100 attempts to assign molecule %d to an\n" |
598 |
> |
"\tunderworked processor, but there's no good place to\n" |
599 |
> |
"\tleave it. OpenMD is assigning it at random to processor %d.\n", |
600 |
|
i, which_proc); |
601 |
< |
|
601 |
> |
|
602 |
|
painCave.isFatal = 0; |
603 |
+ |
painCave.severity = OPENMD_INFO; |
604 |
|
simError(); |
605 |
|
|
606 |
|
molToProcMap[i] = which_proc; |
645 |
|
} |
646 |
|
|
647 |
|
delete myRandom; |
648 |
< |
|
648 |
> |
|
649 |
|
// Spray out this nonsense to all other processors: |
650 |
< |
|
624 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
650 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
651 |
|
} else { |
652 |
|
|
653 |
|
// Listen to your marching orders from processor 0: |
654 |
< |
|
655 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
654 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
655 |
> |
|
656 |
|
} |
657 |
|
|
658 |
|
info->setMolToProcMap(molToProcMap); |
699 |
|
set<AtomType*>::iterator i; |
700 |
|
bool hasDirectionalAtoms = false; |
701 |
|
bool hasFixedCharge = false; |
702 |
< |
bool hasMultipoles = false; |
702 |
> |
bool hasDipoles = false; |
703 |
> |
bool hasQuadrupoles = false; |
704 |
|
bool hasPolarizable = false; |
705 |
|
bool hasFluctuatingCharge = false; |
706 |
|
bool hasMetallic = false; |
722 |
|
if (da.isDirectional()){ |
723 |
|
hasDirectionalAtoms = true; |
724 |
|
} |
725 |
< |
if (ma.isMultipole()){ |
726 |
< |
hasMultipoles = true; |
725 |
> |
if (ma.isDipole()){ |
726 |
> |
hasDipoles = true; |
727 |
|
} |
728 |
+ |
if (ma.isQuadrupole()){ |
729 |
+ |
hasQuadrupoles = true; |
730 |
+ |
} |
731 |
|
if (ea.isEAM() || sca.isSuttonChen()){ |
732 |
|
hasMetallic = true; |
733 |
|
} |
751 |
|
storageLayout |= DataStorage::dslTorque; |
752 |
|
} |
753 |
|
} |
754 |
< |
if (hasMultipoles) { |
755 |
< |
storageLayout |= DataStorage::dslElectroFrame; |
754 |
> |
if (hasDipoles) { |
755 |
> |
storageLayout |= DataStorage::dslDipole; |
756 |
|
} |
757 |
+ |
if (hasQuadrupoles) { |
758 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
759 |
+ |
} |
760 |
|
if (hasFixedCharge || hasFluctuatingCharge) { |
761 |
|
storageLayout |= DataStorage::dslSkippedCharge; |
762 |
|
} |
791 |
|
} |
792 |
|
} |
793 |
|
|
794 |
< |
if (simParams->getOutputElectricField()) { |
794 |
> |
if (simParams->getOutputElectricField() | simParams->haveElectricField()) { |
795 |
|
storageLayout |= DataStorage::dslElectricField; |
796 |
|
} |
797 |
+ |
|
798 |
|
if (simParams->getOutputFluctuatingCharges()) { |
799 |
|
storageLayout |= DataStorage::dslFlucQPosition; |
800 |
|
storageLayout |= DataStorage::dslFlucQVelocity; |
801 |
|
storageLayout |= DataStorage::dslFlucQForce; |
802 |
|
} |
803 |
|
|
804 |
+ |
info->setStorageLayout(storageLayout); |
805 |
+ |
|
806 |
|
return storageLayout; |
807 |
|
} |
808 |
|
|
820 |
|
int beginRigidBodyIndex; |
821 |
|
int beginCutoffGroupIndex; |
822 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
823 |
+ |
int nGlobalRigidBodies = info->getNGlobalRigidBodies(); |
824 |
|
|
825 |
|
beginAtomIndex = 0; |
826 |
< |
beginRigidBodyIndex = 0; |
826 |
> |
//rigidbody's index begins right after atom's |
827 |
> |
beginRigidBodyIndex = info->getNGlobalAtoms(); |
828 |
|
beginCutoffGroupIndex = 0; |
829 |
|
|
830 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
887 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
888 |
|
// docs said we could. |
889 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
890 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
891 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
890 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
891 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
892 |
> |
MPI::INT, MPI::SUM); |
893 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
894 |
|
#else |
895 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
896 |
|
#endif |
897 |
|
|
898 |
|
//fill molMembership |
899 |
< |
std::vector<int> globalMolMembership(info->getNGlobalAtoms(), 0); |
899 |
> |
std::vector<int> globalMolMembership(info->getNGlobalAtoms() + |
900 |
> |
info->getNGlobalRigidBodies(), 0); |
901 |
|
|
902 |
< |
for(mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
902 |
> |
for(mol = info->beginMolecule(mi); mol != NULL; |
903 |
> |
mol = info->nextMolecule(mi)) { |
904 |
|
for(atom = mol->beginAtom(ai); atom != NULL; atom = mol->nextAtom(ai)) { |
905 |
|
globalMolMembership[atom->getGlobalIndex()] = mol->getGlobalIndex(); |
906 |
|
} |
907 |
+ |
for (rb = mol->beginRigidBody(ri); rb != NULL; |
908 |
+ |
rb = mol->nextRigidBody(ri)) { |
909 |
+ |
globalMolMembership[rb->getGlobalIndex()] = mol->getGlobalIndex(); |
910 |
+ |
} |
911 |
|
} |
912 |
|
|
913 |
|
#ifdef IS_MPI |
914 |
< |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
914 |
> |
std::vector<int> tmpMolMembership(info->getNGlobalAtoms() + |
915 |
> |
info->getNGlobalRigidBodies(), 0); |
916 |
> |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
917 |
> |
nGlobalAtoms + nGlobalRigidBodies, |
918 |
> |
MPI::INT, MPI::SUM); |
919 |
|
|
871 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
872 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
873 |
– |
|
920 |
|
info->setGlobalMolMembership(tmpMolMembership); |
921 |
|
#else |
922 |
|
info->setGlobalMolMembership(globalMolMembership); |
926 |
|
// here the molecules are listed by their global indices. |
927 |
|
|
928 |
|
std::vector<int> nIOPerMol(info->getNGlobalMolecules(), 0); |
929 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
929 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
930 |
> |
mol = info->nextMolecule(mi)) { |
931 |
|
nIOPerMol[mol->getGlobalIndex()] = mol->getNIntegrableObjects(); |
932 |
|
} |
933 |
|
|
934 |
|
#ifdef IS_MPI |
935 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
936 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
937 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
936 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
937 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
938 |
|
#else |
939 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
940 |
|
#endif |
948 |
|
} |
949 |
|
|
950 |
|
std::vector<StuntDouble*> IOIndexToIntegrableObject(info->getNGlobalIntegrableObjects(), (StuntDouble*)NULL); |
951 |
< |
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
951 |
> |
for (mol = info->beginMolecule(mi); mol != NULL; |
952 |
> |
mol = info->nextMolecule(mi)) { |
953 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
954 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
955 |
< |
for (StuntDouble* integrableObject = mol->beginIntegrableObject(ioi); integrableObject != NULL; |
956 |
< |
integrableObject = mol->nextIntegrableObject(ioi)) { |
957 |
< |
integrableObject->setGlobalIntegrableObjectIndex(globalIO); |
958 |
< |
IOIndexToIntegrableObject[globalIO] = integrableObject; |
955 |
> |
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
956 |
> |
sd = mol->nextIntegrableObject(ioi)) { |
957 |
> |
sd->setGlobalIntegrableObjectIndex(globalIO); |
958 |
> |
IOIndexToIntegrableObject[globalIO] = sd; |
959 |
|
globalIO++; |
960 |
|
} |
961 |
|
} |
965 |
|
} |
966 |
|
|
967 |
|
void SimCreator::loadCoordinates(SimInfo* info, const std::string& mdFileName) { |
920 |
– |
Globals* simParams; |
921 |
– |
|
922 |
– |
simParams = info->getSimParams(); |
968 |
|
|
969 |
|
DumpReader reader(info, mdFileName); |
970 |
|
int nframes = reader.getNFrames(); |
971 |
< |
|
971 |
> |
|
972 |
|
if (nframes > 0) { |
973 |
|
reader.readFrame(nframes - 1); |
974 |
|
} else { |