| 46 | 
  | 
 * [4]  Vardeman & Gezelter, in progress (2009).                         | 
| 47 | 
  | 
 */ | 
| 48 | 
  | 
 | 
| 49 | 
< | 
#ifndef FORCEDECOMPOSITION_COMMUNICATOR_HPP | 
| 50 | 
< | 
#define FORCEDECOMPOSITION_COMMUNICATOR_HPP | 
| 49 | 
> | 
#ifndef PARALLEL_COMMUNICATOR_HPP | 
| 50 | 
> | 
#define PARALLEL_COMMUNICATOR_HPP | 
| 51 | 
  | 
 | 
| 52 | 
  | 
#include <config.h> | 
| 53 | 
  | 
#include <mpi.h> | 
| 57 | 
  | 
   | 
| 58 | 
  | 
#ifdef IS_MPI | 
| 59 | 
  | 
 | 
| 60 | 
< | 
  enum direction { | 
| 61 | 
< | 
    I = 0, | 
| 62 | 
< | 
    J = 1 | 
| 60 | 
> | 
  enum communicatorType { | 
| 61 | 
> | 
    Global = 0, | 
| 62 | 
> | 
    Row = 1, | 
| 63 | 
> | 
    Column = 2 | 
| 64 | 
  | 
  }; | 
| 65 | 
  | 
     | 
| 66 | 
  | 
  template<typename T>  | 
| 79 | 
  | 
  template<> const MPI::Datatype MPITraits<Mat3x3d>::datatype = MPI_REALTYPE; | 
| 80 | 
  | 
  template<> const int MPITraits<Mat3x3d>::dim = 9; | 
| 81 | 
  | 
   | 
| 82 | 
< | 
  template<direction D, typename T> | 
| 83 | 
< | 
  class Comm {  | 
| 82 | 
> | 
  template<communicatorType D, typename T> | 
| 83 | 
> | 
  class Communicator {  | 
| 84 | 
  | 
  public:  | 
| 85 | 
  | 
     | 
| 86 | 
< | 
    Comm<D, T>(int nObjects) { | 
| 86 | 
> | 
    Communicator<D, T>(int nObjects) { | 
| 87 | 
  | 
       | 
| 88 | 
  | 
      int nProc = MPI::COMM_WORLD.Get_size(); | 
| 89 | 
  | 
      int myRank = MPI::COMM_WORLD.Get_rank(); | 
| 99 | 
  | 
      rowIndex_ = myRank / nColumns;       | 
| 100 | 
  | 
      columnIndex_ = myRank % nColumns; | 
| 101 | 
  | 
 | 
| 102 | 
< | 
      if (D == I) { | 
| 102 | 
> | 
      switch(D) { | 
| 103 | 
> | 
      case Row : | 
| 104 | 
  | 
        myComm = MPI::COMM_WORLD.Split(rowIndex_, 0); | 
| 105 | 
< | 
      } else { | 
| 105 | 
> | 
        break; | 
| 106 | 
> | 
      case Column: | 
| 107 | 
  | 
        myComm = MPI::COMM_WORLD.Split(columnIndex_, 0); | 
| 108 | 
+ | 
        break; | 
| 109 | 
+ | 
      case Global: | 
| 110 | 
+ | 
        myComm = MPI::COMM_WORLD.Split(myRank, 0); | 
| 111 | 
  | 
      } | 
| 112 | 
  | 
          | 
| 113 | 
  | 
      int nCommProcs = myComm.Get_size(); | 
| 122 | 
  | 
      displacements[0] = 0; | 
| 123 | 
  | 
      for (int i = 1; i < nCommProcs; i++) { | 
| 124 | 
  | 
        displacements[i] = displacements[i-1] + counts[i-1]; | 
| 125 | 
< | 
      }       | 
| 125 | 
> | 
        size_ += counts[i-1]; | 
| 126 | 
> | 
      } | 
| 127 | 
> | 
 | 
| 128 | 
> | 
      size_ = 0; | 
| 129 | 
> | 
      for (int i = 0; i < nCommProcs; i++) { | 
| 130 | 
> | 
        size_ += counts[i]; | 
| 131 | 
> | 
      } | 
| 132 | 
  | 
    } | 
| 133 | 
  | 
 | 
| 134 | 
  | 
 | 
| 150 | 
  | 
      myComm.Reduce_scatter(&v1[0], &v2[0], &counts[0],  | 
| 151 | 
  | 
                            MPITraits<T>::datatype, MPI::SUM); | 
| 152 | 
  | 
    } | 
| 153 | 
+ | 
 | 
| 154 | 
+ | 
    int getSize() { | 
| 155 | 
+ | 
      return size_; | 
| 156 | 
+ | 
    } | 
| 157 | 
  | 
     | 
| 158 | 
  | 
  private: | 
| 159 | 
  | 
    int planSize_;     ///< how many are on local proc | 
| 160 | 
  | 
    int rowIndex_; | 
| 161 | 
  | 
    int columnIndex_; | 
| 162 | 
+ | 
    int size_; | 
| 163 | 
  | 
    std::vector<int> counts; | 
| 164 | 
  | 
    std::vector<int> displacements; | 
| 165 | 
  | 
    MPI::Intracomm myComm; |