| 46 |
|
* [4] Vardeman & Gezelter, in progress (2009). |
| 47 |
|
*/ |
| 48 |
|
|
| 49 |
< |
#ifndef FORCEDECOMPOSITION_COMMUNICATOR_HPP |
| 50 |
< |
#define FORCEDECOMPOSITION_COMMUNICATOR_HPP |
| 49 |
> |
#ifndef PARALLEL_COMMUNICATOR_HPP |
| 50 |
> |
#define PARALLEL_COMMUNICATOR_HPP |
| 51 |
|
|
| 52 |
|
#include <config.h> |
| 53 |
|
#include <mpi.h> |
| 57 |
|
|
| 58 |
|
#ifdef IS_MPI |
| 59 |
|
|
| 60 |
< |
enum direction { |
| 61 |
< |
Row = 0, |
| 62 |
< |
Column = 1 |
| 60 |
> |
enum communicatorType { |
| 61 |
> |
Global = 0, |
| 62 |
> |
Row = 1, |
| 63 |
> |
Column = 2 |
| 64 |
|
}; |
| 65 |
|
|
| 66 |
|
template<typename T> |
| 79 |
|
template<> const MPI::Datatype MPITraits<Mat3x3d>::datatype = MPI_REALTYPE; |
| 80 |
|
template<> const int MPITraits<Mat3x3d>::dim = 9; |
| 81 |
|
|
| 82 |
< |
template<direction D, typename T> |
| 82 |
> |
template<communicatorType D, typename T> |
| 83 |
|
class Communicator { |
| 84 |
|
public: |
| 85 |
|
|
| 99 |
|
rowIndex_ = myRank / nColumns; |
| 100 |
|
columnIndex_ = myRank % nColumns; |
| 101 |
|
|
| 102 |
< |
if (D == Row) { |
| 102 |
> |
switch(D) { |
| 103 |
> |
case Row : |
| 104 |
|
myComm = MPI::COMM_WORLD.Split(rowIndex_, 0); |
| 105 |
< |
} else { |
| 105 |
> |
break; |
| 106 |
> |
case Column: |
| 107 |
|
myComm = MPI::COMM_WORLD.Split(columnIndex_, 0); |
| 108 |
+ |
break; |
| 109 |
+ |
case Global: |
| 110 |
+ |
myComm = MPI::COMM_WORLD.Split(myRank, 0); |
| 111 |
|
} |
| 112 |
|
|
| 113 |
|
int nCommProcs = myComm.Get_size(); |
| 119 |
|
|
| 120 |
|
myComm.Allgather(&planSize_, 1, MPI::INT, &counts[0], 1, MPI::INT); |
| 121 |
|
|
| 116 |
– |
|
| 122 |
|
displacements[0] = 0; |
| 123 |
|
for (int i = 1; i < nCommProcs; i++) { |
| 124 |
|
displacements[i] = displacements[i-1] + counts[i-1]; |
| 125 |
< |
size_ += count[i-1]; |
| 125 |
> |
size_ += counts[i-1]; |
| 126 |
|
} |
| 127 |
|
|
| 128 |
|
size_ = 0; |