| 56 |
|
#include "brains/SimCreator.hpp" |
| 57 |
|
#include "brains/SimSnapshotManager.hpp" |
| 58 |
|
#include "io/DumpReader.hpp" |
| 59 |
< |
#include "UseTheForce/ForceFieldFactory.hpp" |
| 59 |
> |
#include "brains/ForceField.hpp" |
| 60 |
|
#include "utils/simError.h" |
| 61 |
|
#include "utils/StringUtils.hpp" |
| 62 |
|
#include "math/SeqRandNumGen.hpp" |
| 256 |
|
int metaDataBlockStart = -1; |
| 257 |
|
int metaDataBlockEnd = -1; |
| 258 |
|
int i; |
| 259 |
< |
int mdOffset; |
| 259 |
> |
streamoff mdOffset; |
| 260 |
|
int mdFileVersion; |
| 261 |
|
|
| 262 |
+ |
|
| 263 |
|
#ifdef IS_MPI |
| 264 |
|
const int masterNode = 0; |
| 265 |
|
if (worldRank == masterNode) { |
| 266 |
|
#endif |
| 267 |
|
|
| 268 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
| 268 |
> |
std::ifstream mdFile_; |
| 269 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
| 270 |
|
|
| 271 |
|
if (mdFile_.fail()) { |
| 272 |
|
sprintf(painCave.errMsg, |
| 373 |
|
metaDataBlockStart + 1); |
| 374 |
|
|
| 375 |
|
//create the force field |
| 376 |
< |
ForceField * ff = ForceFieldFactory::getInstance()->createForceField(simParams->getForceField()); |
| 376 |
> |
ForceField * ff = new ForceField(simParams->getForceField()); |
| 377 |
|
|
| 378 |
|
if (ff == NULL) { |
| 379 |
|
sprintf(painCave.errMsg, |
| 428 |
|
|
| 429 |
|
int storageLayout = computeStorageLayout(info); |
| 430 |
|
|
| 429 |
– |
cerr << "computed Storage Layout = " << storageLayout << "\n"; |
| 430 |
– |
|
| 431 |
|
//allocate memory for DataStorage(circular reference, need to |
| 432 |
|
//break it) |
| 433 |
|
info->setSnapshotManager(new SimSnapshotManager(info, storageLayout)); |
| 507 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 508 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 509 |
|
|
| 510 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 510 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 511 |
|
|
| 512 |
|
if (nProcessors > nGlobalMols) { |
| 513 |
|
sprintf(painCave.errMsg, |
| 622 |
|
delete myRandom; |
| 623 |
|
|
| 624 |
|
// Spray out this nonsense to all other processors: |
| 625 |
< |
|
| 626 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 625 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 626 |
|
} else { |
| 627 |
|
|
| 628 |
|
// Listen to your marching orders from processor 0: |
| 629 |
< |
|
| 631 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 629 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 630 |
|
} |
| 631 |
|
|
| 632 |
|
info->setMolToProcMap(molToProcMap); |
| 673 |
|
set<AtomType*>::iterator i; |
| 674 |
|
bool hasDirectionalAtoms = false; |
| 675 |
|
bool hasFixedCharge = false; |
| 676 |
< |
bool hasMultipoles = false; |
| 676 |
> |
bool hasDipoles = false; |
| 677 |
> |
bool hasQuadrupoles = false; |
| 678 |
|
bool hasPolarizable = false; |
| 679 |
|
bool hasFluctuatingCharge = false; |
| 680 |
|
bool hasMetallic = false; |
| 696 |
|
if (da.isDirectional()){ |
| 697 |
|
hasDirectionalAtoms = true; |
| 698 |
|
} |
| 699 |
< |
if (ma.isMultipole()){ |
| 700 |
< |
hasMultipoles = true; |
| 699 |
> |
if (ma.isDipole()){ |
| 700 |
> |
hasDipoles = true; |
| 701 |
|
} |
| 702 |
+ |
if (ma.isQuadrupole()){ |
| 703 |
+ |
hasQuadrupoles = true; |
| 704 |
+ |
} |
| 705 |
|
if (ea.isEAM() || sca.isSuttonChen()){ |
| 706 |
|
hasMetallic = true; |
| 707 |
|
} |
| 725 |
|
storageLayout |= DataStorage::dslTorque; |
| 726 |
|
} |
| 727 |
|
} |
| 728 |
< |
if (hasMultipoles) { |
| 729 |
< |
storageLayout |= DataStorage::dslElectroFrame; |
| 728 |
> |
if (hasDipoles) { |
| 729 |
> |
storageLayout |= DataStorage::dslDipole; |
| 730 |
|
} |
| 731 |
+ |
if (hasQuadrupoles) { |
| 732 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
| 733 |
+ |
} |
| 734 |
|
if (hasFixedCharge || hasFluctuatingCharge) { |
| 735 |
|
storageLayout |= DataStorage::dslSkippedCharge; |
| 736 |
|
} |
| 758 |
|
if (simParams->getOutputParticlePotential()) { |
| 759 |
|
storageLayout |= DataStorage::dslParticlePot; |
| 760 |
|
} |
| 761 |
+ |
|
| 762 |
+ |
if (simParams->havePrintHeatFlux()) { |
| 763 |
+ |
if (simParams->getPrintHeatFlux()) { |
| 764 |
+ |
storageLayout |= DataStorage::dslParticlePot; |
| 765 |
+ |
} |
| 766 |
+ |
} |
| 767 |
+ |
|
| 768 |
|
if (simParams->getOutputElectricField()) { |
| 769 |
|
storageLayout |= DataStorage::dslElectricField; |
| 770 |
|
} |
| 771 |
+ |
|
| 772 |
|
if (simParams->getOutputFluctuatingCharges()) { |
| 773 |
|
storageLayout |= DataStorage::dslFlucQPosition; |
| 774 |
|
storageLayout |= DataStorage::dslFlucQVelocity; |
| 794 |
|
int nGlobalAtoms = info->getNGlobalAtoms(); |
| 795 |
|
|
| 796 |
|
beginAtomIndex = 0; |
| 797 |
< |
beginRigidBodyIndex = 0; |
| 797 |
> |
//rigidbody's index begins right after atom's |
| 798 |
> |
beginRigidBodyIndex = info->getNGlobalAtoms(); |
| 799 |
|
beginCutoffGroupIndex = 0; |
| 800 |
|
|
| 801 |
|
for(int i = 0; i < info->getNGlobalMolecules(); i++) { |
| 858 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 859 |
|
// docs said we could. |
| 860 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 861 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 862 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 861 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 862 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 863 |
> |
MPI::INT, MPI::SUM); |
| 864 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 865 |
|
#else |
| 866 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 877 |
|
|
| 878 |
|
#ifdef IS_MPI |
| 879 |
|
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 880 |
+ |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 881 |
+ |
nGlobalAtoms, |
| 882 |
+ |
MPI::INT, MPI::SUM); |
| 883 |
|
|
| 866 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 867 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 868 |
– |
|
| 884 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 885 |
|
#else |
| 886 |
|
info->setGlobalMolMembership(globalMolMembership); |
| 896 |
|
|
| 897 |
|
#ifdef IS_MPI |
| 898 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 899 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 900 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 899 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 900 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 901 |
|
#else |
| 902 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 903 |
|
#endif |
| 914 |
|
for (mol = info->beginMolecule(mi); mol != NULL; mol = info->nextMolecule(mi)) { |
| 915 |
|
int myGlobalIndex = mol->getGlobalIndex(); |
| 916 |
|
int globalIO = startingIOIndexForMol[myGlobalIndex]; |
| 917 |
< |
for (StuntDouble* integrableObject = mol->beginIntegrableObject(ioi); integrableObject != NULL; |
| 918 |
< |
integrableObject = mol->nextIntegrableObject(ioi)) { |
| 919 |
< |
integrableObject->setGlobalIntegrableObjectIndex(globalIO); |
| 920 |
< |
IOIndexToIntegrableObject[globalIO] = integrableObject; |
| 917 |
> |
for (StuntDouble* sd = mol->beginIntegrableObject(ioi); sd != NULL; |
| 918 |
> |
sd = mol->nextIntegrableObject(ioi)) { |
| 919 |
> |
sd->setGlobalIntegrableObjectIndex(globalIO); |
| 920 |
> |
IOIndexToIntegrableObject[globalIO] = sd; |
| 921 |
|
globalIO++; |
| 922 |
|
} |
| 923 |
|
} |