| 64 | 
  | 
#include "brains/ForceField.hpp" | 
| 65 | 
  | 
#include "utils/simError.h" | 
| 66 | 
  | 
#include "utils/StringUtils.hpp" | 
| 67 | 
+ | 
#include "utils/Revision.hpp" | 
| 68 | 
  | 
#include "math/SeqRandNumGen.hpp" | 
| 69 | 
  | 
#include "mdParser/MDLexer.hpp" | 
| 70 | 
  | 
#include "mdParser/MDParser.hpp" | 
| 104 | 
  | 
      const int masterNode = 0; | 
| 105 | 
  | 
 | 
| 106 | 
  | 
      if (worldRank == masterNode) { | 
| 107 | 
< | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 107 | 
> | 
        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 108 | 
  | 
#endif                  | 
| 109 | 
  | 
        SimplePreprocessor preprocessor; | 
| 110 | 
< | 
        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); | 
| 110 | 
> | 
        preprocessor.preprocess(rawMetaDataStream, filename,  | 
| 111 | 
> | 
                                startOfMetaDataBlock, ppStream); | 
| 112 | 
  | 
                 | 
| 113 | 
  | 
#ifdef IS_MPI             | 
| 114 | 
< | 
        //brocasting the stream size | 
| 114 | 
> | 
        //broadcasting the stream size | 
| 115 | 
  | 
        streamSize = ppStream.str().size() +1; | 
| 116 | 
< | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 117 | 
< | 
        MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); | 
| 118 | 
< | 
                            | 
| 116 | 
> | 
        MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 117 | 
> | 
        MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), | 
| 118 | 
> | 
                  streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 119 | 
  | 
      } else { | 
| 118 | 
– | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 120 | 
  | 
 | 
| 121 | 
< | 
        //get stream size | 
| 121 | 
< | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 121 | 
> | 
        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 122 | 
  | 
 | 
| 123 | 
+ | 
        //get stream size | 
| 124 | 
+ | 
        MPI_Bcast(&streamSize, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 125 | 
  | 
        char* buf = new char[streamSize]; | 
| 126 | 
  | 
        assert(buf); | 
| 127 | 
  | 
                 | 
| 128 | 
  | 
        //receive file content | 
| 129 | 
< | 
        MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 130 | 
< | 
                 | 
| 129 | 
> | 
        MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 130 | 
> | 
 | 
| 131 | 
  | 
        ppStream.str(buf); | 
| 132 | 
  | 
        delete [] buf; | 
| 133 | 
  | 
      } | 
| 151 | 
  | 
      parser.initializeASTFactory(factory); | 
| 152 | 
  | 
      parser.setASTFactory(&factory); | 
| 153 | 
  | 
      parser.mdfile(); | 
| 152 | 
– | 
 | 
| 154 | 
  | 
      // Create a tree parser that reads information into Globals | 
| 155 | 
  | 
      MDTreeParser treeParser; | 
| 156 | 
  | 
      treeParser.initializeASTFactory(factory); | 
| 264 | 
  | 
    version.append("."); | 
| 265 | 
  | 
    version.append(OPENMD_VERSION_MINOR); | 
| 266 | 
  | 
 | 
| 267 | 
< | 
    std::string svnrev; | 
| 267 | 
> | 
    std::string svnrev(g_REVISION, strnlen(g_REVISION, 20)); | 
| 268 | 
  | 
    //convert a macro from compiler to a string in c++ | 
| 269 | 
< | 
    STR_DEFINE(svnrev, SVN_REV ); | 
| 269 | 
> | 
    // STR_DEFINE(svnrev, SVN_REV ); | 
| 270 | 
  | 
    version.append(" Revision: "); | 
| 271 | 
  | 
    // If there's no SVN revision, just call this the RELEASE revision. | 
| 272 | 
  | 
    if (!svnrev.empty()) { | 
| 521 | 
  | 
                                                    // error | 
| 522 | 
  | 
                                                    // condition: | 
| 523 | 
  | 
     | 
| 524 | 
< | 
    nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 524 | 
> | 
    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);     | 
| 525 | 
  | 
     | 
| 526 | 
  | 
    if (nProcessors > nGlobalMols) { | 
| 527 | 
  | 
      sprintf(painCave.errMsg, | 
| 640 | 
  | 
      delete myRandom; | 
| 641 | 
  | 
 | 
| 642 | 
  | 
      // Spray out this nonsense to all other processors: | 
| 643 | 
< | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 643 | 
> | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 644 | 
> | 
 | 
| 645 | 
  | 
    } else { | 
| 646 | 
  | 
       | 
| 647 | 
  | 
      // Listen to your marching orders from processor 0: | 
| 648 | 
< | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 648 | 
> | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 649 | 
  | 
 | 
| 650 | 
  | 
    } | 
| 651 | 
  | 
     | 
| 919 | 
  | 
    // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | 
| 920 | 
  | 
    // docs said we could. | 
| 921 | 
  | 
    std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | 
| 922 | 
< | 
    MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],  | 
| 923 | 
< | 
                              &tmpGroupMembership[0], nGlobalAtoms, | 
| 924 | 
< | 
                              MPI::INT, MPI::SUM); | 
| 922 | 
> | 
    MPI_Allreduce(&globalGroupMembership[0],  | 
| 923 | 
> | 
                  &tmpGroupMembership[0], nGlobalAtoms, | 
| 924 | 
> | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 925 | 
> | 
 | 
| 926 | 
  | 
    info->setGlobalGroupMembership(tmpGroupMembership); | 
| 927 | 
  | 
#else | 
| 928 | 
  | 
    info->setGlobalGroupMembership(globalGroupMembership); | 
| 946 | 
  | 
#ifdef IS_MPI | 
| 947 | 
  | 
    std::vector<int> tmpMolMembership(info->getNGlobalAtoms() +  | 
| 948 | 
  | 
                                      info->getNGlobalRigidBodies(), 0); | 
| 949 | 
< | 
    MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 950 | 
< | 
                              nGlobalAtoms + nGlobalRigidBodies, | 
| 951 | 
< | 
                              MPI::INT, MPI::SUM); | 
| 949 | 
> | 
    MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 950 | 
> | 
                  nGlobalAtoms + nGlobalRigidBodies, | 
| 951 | 
> | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 952 | 
  | 
     | 
| 953 | 
  | 
    info->setGlobalMolMembership(tmpMolMembership); | 
| 954 | 
  | 
#else | 
| 966 | 
  | 
     | 
| 967 | 
  | 
#ifdef IS_MPI | 
| 968 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 969 | 
< | 
    MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 970 | 
< | 
                              info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 969 | 
> | 
    MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 970 | 
> | 
      info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 971 | 
  | 
#else | 
| 972 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 973 | 
  | 
#endif     |