| 103 | 
  | 
      const int masterNode = 0; | 
| 104 | 
  | 
 | 
| 105 | 
  | 
      if (worldRank == masterNode) { | 
| 106 | 
< | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 106 | 
> | 
        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 107 | 
> | 
        // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 108 | 
  | 
#endif                  | 
| 109 | 
  | 
        SimplePreprocessor preprocessor; | 
| 110 | 
< | 
        preprocessor.preprocess(rawMetaDataStream, filename, startOfMetaDataBlock, ppStream); | 
| 110 | 
> | 
        preprocessor.preprocess(rawMetaDataStream, filename,  | 
| 111 | 
> | 
                                startOfMetaDataBlock, ppStream); | 
| 112 | 
  | 
                 | 
| 113 | 
  | 
#ifdef IS_MPI             | 
| 114 | 
< | 
        //brocasting the stream size | 
| 114 | 
> | 
        //broadcasting the stream size | 
| 115 | 
  | 
        streamSize = ppStream.str().size() +1; | 
| 116 | 
< | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 117 | 
< | 
        MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())), streamSize, MPI::CHAR, masterNode); | 
| 116 | 
> | 
        MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); | 
| 117 | 
> | 
        MPI_Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())),  | 
| 118 | 
> | 
                  streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 119 | 
> | 
 | 
| 120 | 
> | 
        // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 121 | 
> | 
        // MPI::COMM_WORLD.Bcast(static_cast<void*>(const_cast<char*>(ppStream.str().c_str())),  | 
| 122 | 
> | 
        //                       streamSize, MPI::CHAR, masterNode); | 
| 123 | 
  | 
                            | 
| 124 | 
  | 
      } else { | 
| 118 | 
– | 
        MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 125 | 
  | 
 | 
| 126 | 
< | 
        //get stream size | 
| 127 | 
< | 
        MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 126 | 
> | 
        MPI_Bcast(&mdFileVersion, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 127 | 
> | 
        // MPI::COMM_WORLD.Bcast(&mdFileVersion, 1, MPI::INT, masterNode); | 
| 128 | 
  | 
 | 
| 129 | 
+ | 
        //get stream size | 
| 130 | 
+ | 
        MPI_Bcast(&streamSize, 1, MPI_LONG, masterNode, MPI_COMM_WORLD); | 
| 131 | 
+ | 
        // MPI::COMM_WORLD.Bcast(&streamSize, 1, MPI::LONG, masterNode); | 
| 132 | 
  | 
        char* buf = new char[streamSize]; | 
| 133 | 
  | 
        assert(buf); | 
| 134 | 
  | 
                 | 
| 135 | 
  | 
        //receive file content | 
| 136 | 
< | 
        MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 137 | 
< | 
                 | 
| 136 | 
> | 
        MPI_Bcast(buf, streamSize, MPI_CHAR, masterNode, MPI_COMM_WORLD); | 
| 137 | 
> | 
        // MPI::COMM_WORLD.Bcast(buf, streamSize, MPI::CHAR, masterNode); | 
| 138 | 
> | 
 | 
| 139 | 
  | 
        ppStream.str(buf); | 
| 140 | 
  | 
        delete [] buf; | 
| 141 | 
  | 
      } | 
| 159 | 
  | 
      parser.initializeASTFactory(factory); | 
| 160 | 
  | 
      parser.setASTFactory(&factory); | 
| 161 | 
  | 
      parser.mdfile(); | 
| 152 | 
– | 
 | 
| 162 | 
  | 
      // Create a tree parser that reads information into Globals | 
| 163 | 
  | 
      MDTreeParser treeParser; | 
| 164 | 
  | 
      treeParser.initializeASTFactory(factory); | 
| 529 | 
  | 
                                                    // error | 
| 530 | 
  | 
                                                    // condition: | 
| 531 | 
  | 
     | 
| 532 | 
< | 
    nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 532 | 
> | 
    MPI_Comm_size( MPI_COMM_WORLD, &nProcessors);     | 
| 533 | 
> | 
    //nProcessors = MPI::COMM_WORLD.Get_size(); | 
| 534 | 
  | 
     | 
| 535 | 
  | 
    if (nProcessors > nGlobalMols) { | 
| 536 | 
  | 
      sprintf(painCave.errMsg, | 
| 649 | 
  | 
      delete myRandom; | 
| 650 | 
  | 
 | 
| 651 | 
  | 
      // Spray out this nonsense to all other processors: | 
| 652 | 
< | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 652 | 
> | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 653 | 
> | 
      // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 654 | 
  | 
    } else { | 
| 655 | 
  | 
       | 
| 656 | 
  | 
      // Listen to your marching orders from processor 0: | 
| 657 | 
< | 
      MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 657 | 
> | 
      MPI_Bcast(&molToProcMap[0], nGlobalMols, MPI_INT, 0, MPI_COMM_WORLD); | 
| 658 | 
> | 
      // MPI::COMM_WORLD.Bcast(&molToProcMap[0], nGlobalMols, MPI::INT, 0); | 
| 659 | 
  | 
 | 
| 660 | 
  | 
    } | 
| 661 | 
  | 
     | 
| 929 | 
  | 
    // This would be prettier if we could use MPI_IN_PLACE like the MPI-2 | 
| 930 | 
  | 
    // docs said we could. | 
| 931 | 
  | 
    std::vector<int> tmpGroupMembership(info->getNGlobalAtoms(), 0); | 
| 932 | 
< | 
    MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],  | 
| 933 | 
< | 
                              &tmpGroupMembership[0], nGlobalAtoms, | 
| 934 | 
< | 
                              MPI::INT, MPI::SUM); | 
| 932 | 
> | 
    MPI_Allreduce(&globalGroupMembership[0],  | 
| 933 | 
> | 
                  &tmpGroupMembership[0], nGlobalAtoms, | 
| 934 | 
> | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 935 | 
> | 
    // MPI::COMM_WORLD.Allreduce(&globalGroupMembership[0],  | 
| 936 | 
> | 
    //                           &tmpGroupMembership[0], nGlobalAtoms, | 
| 937 | 
> | 
    //                           MPI::INT, MPI::SUM); | 
| 938 | 
  | 
    info->setGlobalGroupMembership(tmpGroupMembership); | 
| 939 | 
  | 
#else | 
| 940 | 
  | 
    info->setGlobalGroupMembership(globalGroupMembership); | 
| 958 | 
  | 
#ifdef IS_MPI | 
| 959 | 
  | 
    std::vector<int> tmpMolMembership(info->getNGlobalAtoms() +  | 
| 960 | 
  | 
                                      info->getNGlobalRigidBodies(), 0); | 
| 961 | 
< | 
    MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 962 | 
< | 
                              nGlobalAtoms + nGlobalRigidBodies, | 
| 963 | 
< | 
                              MPI::INT, MPI::SUM); | 
| 961 | 
> | 
    MPI_Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 962 | 
> | 
                  nGlobalAtoms + nGlobalRigidBodies, | 
| 963 | 
> | 
                  MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 964 | 
> | 
    // MPI::COMM_WORLD.Allreduce(&globalMolMembership[0], &tmpMolMembership[0],  | 
| 965 | 
> | 
    //                           nGlobalAtoms + nGlobalRigidBodies, | 
| 966 | 
> | 
    //                           MPI::INT, MPI::SUM); | 
| 967 | 
  | 
     | 
| 968 | 
  | 
    info->setGlobalMolMembership(tmpMolMembership); | 
| 969 | 
  | 
#else | 
| 981 | 
  | 
     | 
| 982 | 
  | 
#ifdef IS_MPI | 
| 983 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol(info->getNGlobalMolecules(), 0); | 
| 984 | 
< | 
    MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 985 | 
< | 
                              info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 984 | 
> | 
    MPI_Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 985 | 
> | 
      info->getNGlobalMolecules(), MPI_INT, MPI_SUM, MPI_COMM_WORLD); | 
| 986 | 
> | 
    // MPI::COMM_WORLD.Allreduce(&nIOPerMol[0], &numIntegrableObjectsPerMol[0],  | 
| 987 | 
> | 
    //                           info->getNGlobalMolecules(), MPI::INT, MPI::SUM); | 
| 988 | 
  | 
#else | 
| 989 | 
  | 
    std::vector<int> numIntegrableObjectsPerMol = nIOPerMol; | 
| 990 | 
  | 
#endif     |