| 259 |
|
streamoff mdOffset; |
| 260 |
|
int mdFileVersion; |
| 261 |
|
|
| 262 |
+ |
|
| 263 |
|
#ifdef IS_MPI |
| 264 |
|
const int masterNode = 0; |
| 265 |
|
if (worldRank == masterNode) { |
| 266 |
|
#endif |
| 267 |
|
|
| 268 |
< |
std::ifstream mdFile_(mdFileName.c_str()); |
| 268 |
> |
std::ifstream mdFile_; |
| 269 |
> |
mdFile_.open(mdFileName.c_str(), ifstream::in | ifstream::binary); |
| 270 |
|
|
| 271 |
|
if (mdFile_.fail()) { |
| 272 |
|
sprintf(painCave.errMsg, |
| 507 |
|
int nGlobalMols = info->getNGlobalMolecules(); |
| 508 |
|
std::vector<int> molToProcMap(nGlobalMols, -1); // default to an error condition: |
| 509 |
|
|
| 510 |
< |
MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); |
| 510 |
> |
nProcessors = MPI::COMM_WORLD.Get_size(); |
| 511 |
|
|
| 512 |
|
if (nProcessors > nGlobalMols) { |
| 513 |
|
sprintf(painCave.errMsg, |
| 622 |
|
delete myRandom; |
| 623 |
|
|
| 624 |
|
// Spray out this nonsense to all other processors: |
| 625 |
< |
|
| 624 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 625 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 626 |
|
} else { |
| 627 |
|
|
| 628 |
|
// Listen to your marching orders from processor 0: |
| 629 |
< |
|
| 629 |
< |
MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); |
| 629 |
> |
MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); |
| 630 |
|
} |
| 631 |
|
|
| 632 |
|
info->setMolToProcMap(molToProcMap); |
| 673 |
|
set<AtomType*>::iterator i; |
| 674 |
|
bool hasDirectionalAtoms = false; |
| 675 |
|
bool hasFixedCharge = false; |
| 676 |
< |
bool hasMultipoles = false; |
| 676 |
> |
bool hasDipoles = false; |
| 677 |
> |
bool hasQuadrupoles = false; |
| 678 |
|
bool hasPolarizable = false; |
| 679 |
|
bool hasFluctuatingCharge = false; |
| 680 |
|
bool hasMetallic = false; |
| 696 |
|
if (da.isDirectional()){ |
| 697 |
|
hasDirectionalAtoms = true; |
| 698 |
|
} |
| 699 |
< |
if (ma.isMultipole()){ |
| 700 |
< |
hasMultipoles = true; |
| 699 |
> |
if (ma.isDipole()){ |
| 700 |
> |
hasDipoles = true; |
| 701 |
|
} |
| 702 |
+ |
if (ma.isQuadrupole()){ |
| 703 |
+ |
hasQuadrupoles = true; |
| 704 |
+ |
} |
| 705 |
|
if (ea.isEAM() || sca.isSuttonChen()){ |
| 706 |
|
hasMetallic = true; |
| 707 |
|
} |
| 725 |
|
storageLayout |= DataStorage::dslTorque; |
| 726 |
|
} |
| 727 |
|
} |
| 728 |
+ |
if (hasDipoles) { |
| 729 |
+ |
storageLayout |= DataStorage::dslDipole; |
| 730 |
+ |
} |
| 731 |
+ |
if (hasQuadrupoles) { |
| 732 |
+ |
storageLayout |= DataStorage::dslQuadrupole; |
| 733 |
+ |
} |
| 734 |
|
if (hasFixedCharge || hasFluctuatingCharge) { |
| 735 |
|
storageLayout |= DataStorage::dslSkippedCharge; |
| 736 |
|
} |
| 858 |
|
// This would be prettier if we could use MPI_IN_PLACE like the MPI-2 |
| 859 |
|
// docs said we could. |
| 860 |
|
std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); |
| 861 |
< |
MPI_Allreduce(&globalGroupMembership[0], &tmpGroupMembership[0], nGlobalAtoms, |
| 862 |
< |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 861 |
> |
MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0], |
| 862 |
> |
&tmpGroupMembership[0], nGlobalAtoms, |
| 863 |
> |
MPI::INT, MPI::SUM); |
| 864 |
|
info->setGlobalGroupMembership(tmpGroupMembership); |
| 865 |
|
#else |
| 866 |
|
info->setGlobalGroupMembership(globalGroupMembership); |
| 877 |
|
|
| 878 |
|
#ifdef IS_MPI |
| 879 |
|
std::vector<int> tmpMolMembership(info->getNGlobalAtoms(), 0); |
| 880 |
+ |
MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0], |
| 881 |
+ |
nGlobalAtoms, |
| 882 |
+ |
MPI::INT, MPI::SUM); |
| 883 |
|
|
| 870 |
– |
MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0], nGlobalAtoms, |
| 871 |
– |
MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 872 |
– |
|
| 884 |
|
info->setGlobalMolMembership(tmpMolMembership); |
| 885 |
|
#else |
| 886 |
|
info->setGlobalMolMembership(globalMolMembership); |
| 896 |
|
|
| 897 |
|
#ifdef IS_MPI |
| 898 |
|
std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); |
| 899 |
< |
MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 900 |
< |
info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); |
| 899 |
> |
MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0], |
| 900 |
> |
info->getNGlobalMolecules(), MPI::INT, MPI::SUM); |
| 901 |
|
#else |
| 902 |
|
std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; |
| 903 |
|
#endif |