# | Line 1 | Line 1 | |
---|---|---|
1 | < | /* |
1 | > | /* |
2 | * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. | |
3 | * | |
4 | * The University of Notre Dame grants you ("Licensee") a | |
# | Line 42 | Line 42 | |
42 | #include "math/ParallelRandNumGen.hpp" | |
43 | #ifdef IS_MPI | |
44 | #include <mpi.h> | |
45 | + | #endif |
46 | ||
47 | namespace oopse { | |
48 | ||
49 | + | int ParallelRandNumGen::nCreatedRNG_ = 0; |
50 | ||
51 | + | ParallelRandNumGen::ParallelRandNumGen(const uint32& oneSeed) { |
52 | ||
50 | – | int ParallelRandNumGen::nCreatedRNG_ = 0; |
51 | – | |
52 | – | ParallelRandNumGen::ParallelRandNumGen( const uint32& oneSeed) { |
53 | – | |
53 | const int masterNode = 0; | |
54 | int seed = oneSeed; | |
55 | + | #ifdef IS_MPI |
56 | MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | |
57 | < | |
57 | > | #endif |
58 | if (seed != oneSeed) { | |
59 | < | sprintf(painCave.errMsg, |
60 | < | "Using different seed to initialize ParallelRandNumGen.\n"); |
61 | < | painCave.isFatal = 1;; |
62 | < | simError(); |
59 | > | sprintf(painCave.errMsg, |
60 | > | "Using different seed to initialize ParallelRandNumGen.\n"); |
61 | > | painCave.isFatal = 1;; |
62 | > | simError(); |
63 | } | |
64 | ||
65 | int nProcessors; | |
66 | + | #ifdef IS_MPI |
67 | MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); | |
68 | < | |
69 | < | //In order to generate independent random number stream, the actual seed used by random |
70 | < | //number generator is the seed passed to the constructor plus the number of random number |
71 | < | //generators which are already created. |
68 | > | MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); |
69 | > | #else |
70 | > | nProcessors = 1; |
71 | > | myRank_ = 0; |
72 | > | #endif |
73 | > | //In order to generate independent random number stream, the |
74 | > | //actual seed used by random number generator is the seed passed |
75 | > | //to the constructor plus the number of random number generators |
76 | > | //which are already created. |
77 | int newSeed = oneSeed + nCreatedRNG_; | |
78 | < | mtRand_ = new MTRand(newSeed, nProcessors, worldRank); |
79 | < | |
78 | > | mtRand_ = new MTRand(newSeed, nProcessors, myRank_); |
79 | > | |
80 | ++nCreatedRNG_; | |
81 | < | } |
81 | > | } |
82 | ||
83 | < | ParallelRandNumGen::ParallelRandNumGen() { |
83 | > | ParallelRandNumGen::ParallelRandNumGen() { |
84 | ||
85 | std::vector<uint32> bigSeed; | |
86 | const int masterNode = 0; | |
87 | int nProcessors; | |
88 | + | #ifdef IS_MPI |
89 | MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); | |
90 | < | mtRand_ = new MTRand(nProcessors, worldRank); |
90 | > | MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); |
91 | > | #else |
92 | > | nProcessors = 1; |
93 | > | myRank_ = 0; |
94 | > | #endif |
95 | > | mtRand_ = new MTRand(nProcessors, myRank_); |
96 | ||
97 | < | seed(); /** @todo calling virtual function in constructor is not a good design */ |
98 | < | } |
97 | > | seed(); /** @todo calling virtual function in constructor is |
98 | > | not a good design */ |
99 | > | } |
100 | ||
101 | ||
102 | < | void ParallelRandNumGen::seed( const uint32 oneSeed ) { |
102 | > | void ParallelRandNumGen::seed( const uint32 oneSeed ) { |
103 | ||
104 | const int masterNode = 0; | |
105 | int seed = oneSeed; | |
106 | + | #ifdef IS_MPI |
107 | MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | |
108 | < | |
108 | > | #endif |
109 | if (seed != oneSeed) { | |
110 | < | sprintf(painCave.errMsg, |
111 | < | "Using different seed to initialize ParallelRandNumGen.\n"); |
112 | < | painCave.isFatal = 1;; |
113 | < | simError(); |
110 | > | sprintf(painCave.errMsg, |
111 | > | "Using different seed to initialize ParallelRandNumGen.\n"); |
112 | > | painCave.isFatal = 1;; |
113 | > | simError(); |
114 | } | |
115 | < | |
115 | > | |
116 | int newSeed = oneSeed +nCreatedRNG_; | |
117 | mtRand_->seed(newSeed); | |
118 | < | |
118 | > | |
119 | ++nCreatedRNG_; | |
120 | < | } |
120 | > | } |
121 | ||
122 | < | void ParallelRandNumGen::seed() { |
122 | > | void ParallelRandNumGen::seed() { |
123 | ||
124 | std::vector<uint32> bigSeed; | |
125 | int size; | |
126 | const int masterNode = 0; | |
127 | + | #ifdef IS_MPI |
128 | if (worldRank == masterNode) { | |
129 | < | bigSeed = mtRand_->generateSeeds(); |
115 | < | size = bigSeed.size(); |
116 | < | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
117 | < | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
129 | > | #endif |
130 | ||
131 | + | bigSeed = mtRand_->generateSeeds(); |
132 | + | size = bigSeed.size(); |
133 | + | |
134 | + | #ifdef IS_MPI |
135 | + | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
136 | + | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
137 | }else { | |
138 | < | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
139 | < | bigSeed.resize(size); |
140 | < | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
138 | > | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); |
139 | > | bigSeed.resize(size); |
140 | > | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); |
141 | } | |
142 | + | #endif |
143 | ||
144 | if (bigSeed.size() == 1) { | |
145 | < | mtRand_->seed(bigSeed[0]); |
145 | > | mtRand_->seed(bigSeed[0]); |
146 | } else { | |
147 | < | mtRand_->seed(&bigSeed[0], bigSeed.size()); |
147 | > | mtRand_->seed(&bigSeed[0], bigSeed.size()); |
148 | } | |
149 | ||
150 | ++nCreatedRNG_; | |
151 | < | } |
133 | < | |
134 | < | |
151 | > | } |
152 | } | |
136 | – | |
137 | – | #endif |
– | Removed lines |
+ | Added lines |
< | Changed lines |
> | Changed lines |