| 1 | < | /* | 
| 1 | > | /* | 
| 2 |  | * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved. | 
| 3 |  | * | 
| 4 |  | * The University of Notre Dame grants you ("Licensee") a | 
| 6 |  | * redistribute this software in source and binary code form, provided | 
| 7 |  | * that the following conditions are met: | 
| 8 |  | * | 
| 9 | < | * 1. Acknowledgement of the program authors must be made in any | 
| 10 | < | *    publication of scientific results based in part on use of the | 
| 11 | < | *    program.  An acceptable form of acknowledgement is citation of | 
| 12 | < | *    the article in which the program was described (Matthew | 
| 13 | < | *    A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher | 
| 14 | < | *    J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented | 
| 15 | < | *    Parallel Simulation Engine for Molecular Dynamics," | 
| 16 | < | *    J. Comput. Chem. 26, pp. 252-271 (2005)) | 
| 17 | < | * | 
| 18 | < | * 2. Redistributions of source code must retain the above copyright | 
| 9 | > | * 1. Redistributions of source code must retain the above copyright | 
| 10 |  | *    notice, this list of conditions and the following disclaimer. | 
| 11 |  | * | 
| 12 | < | * 3. Redistributions in binary form must reproduce the above copyright | 
| 12 | > | * 2. Redistributions in binary form must reproduce the above copyright | 
| 13 |  | *    notice, this list of conditions and the following disclaimer in the | 
| 14 |  | *    documentation and/or other materials provided with the | 
| 15 |  | *    distribution. | 
| 28 |  | * arising out of the use of or inability to use software, even if the | 
| 29 |  | * University of Notre Dame has been advised of the possibility of | 
| 30 |  | * such damages. | 
| 31 | + | * | 
| 32 | + | * SUPPORT OPEN SCIENCE!  If you use OpenMD or its source code in your | 
| 33 | + | * research, please cite the appropriate papers when you publish your | 
| 34 | + | * work.  Good starting points are: | 
| 35 | + | * | 
| 36 | + | * [1]  Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). | 
| 37 | + | * [2]  Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). | 
| 38 | + | * [3]  Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). | 
| 39 | + | * [4]  Vardeman & Gezelter, in progress (2009). | 
| 40 |  | */ | 
| 41 |  |  | 
| 42 |  | #include "math/ParallelRandNumGen.hpp" | 
| 43 |  | #ifdef IS_MPI | 
| 44 |  | #include <mpi.h> | 
| 45 | + | #endif | 
| 46 |  |  | 
| 47 | < | namespace oopse { | 
| 47 | > | namespace OpenMD { | 
| 48 |  |  | 
| 49 | + | int ParallelRandNumGen::nCreatedRNG_ = 0; | 
| 50 |  |  | 
| 51 | + | ParallelRandNumGen::ParallelRandNumGen(const uint32& oneSeed) { | 
| 52 |  |  | 
| 53 | < | int ParallelRandNumGen::nCreatedRNG_ = 0; | 
| 53 | > | const int masterNode = 0; | 
| 54 | > | unsigned long seed = oneSeed; | 
| 55 |  |  | 
| 56 | < | ParallelRandNumGen::ParallelRandNumGen( const uint32& oneSeed) { | 
| 56 | > | #ifdef IS_MPI | 
| 57 | > | MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 58 | > | #endif | 
| 59 |  |  | 
| 60 | < | int nProcessors; | 
| 61 | < | MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); | 
| 62 | < | int newSeed = oneSeed +nCreatedRNG_; | 
| 63 | < | mtRand_ = new MTRand(newSeed, nProcessors, worldRank); | 
| 60 | > | if (seed != oneSeed) { | 
| 61 | > | sprintf(painCave.errMsg, | 
| 62 | > | "Using different seed to initialize ParallelRandNumGen.\n"); | 
| 63 | > | painCave.isFatal = 1;; | 
| 64 | > | simError(); | 
| 65 | > | } | 
| 66 |  |  | 
| 67 | + | int nProcessors; | 
| 68 | + | #ifdef IS_MPI | 
| 69 | + | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | 
| 70 | + | MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); | 
| 71 | + | #else | 
| 72 | + | nProcessors = 1; | 
| 73 | + | myRank_ = 0; | 
| 74 | + | #endif | 
| 75 | + | //In order to generate independent random number stream, the | 
| 76 | + | //actual seed used by random number generator is the seed passed | 
| 77 | + | //to the constructor plus the number of random number generators | 
| 78 | + | //which are already created. | 
| 79 | + | unsigned long newSeed = oneSeed + nCreatedRNG_; | 
| 80 | + | mtRand_ = new MTRand(newSeed, nProcessors, myRank_); | 
| 81 | + |  | 
| 82 |  | ++nCreatedRNG_; | 
| 83 | < | } | 
| 83 | > | } | 
| 84 |  |  | 
| 85 | < | ParallelRandNumGen::ParallelRandNumGen() { | 
| 85 | > | ParallelRandNumGen::ParallelRandNumGen() { | 
| 86 |  |  | 
| 87 |  | std::vector<uint32> bigSeed; | 
| 88 |  | const int masterNode = 0; | 
| 89 |  | int nProcessors; | 
| 90 | < | MPI_Comm_size(MPI_COMM_WORLD, &nProcessors); | 
| 91 | < | mtRand_ = new MTRand(nProcessors, worldRank); | 
| 90 | > | #ifdef IS_MPI | 
| 91 | > | MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); | 
| 92 | > | MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); | 
| 93 | > | #else | 
| 94 | > | nProcessors = 1; | 
| 95 | > | myRank_ = 0; | 
| 96 | > | #endif | 
| 97 | > | mtRand_ = new MTRand(nProcessors, myRank_); | 
| 98 |  |  | 
| 99 | < | seed(); | 
| 100 | < | ++nCreatedRNG_; | 
| 101 | < | } | 
| 99 | > | seed();       /** @todo calling virtual function in constructor is | 
| 100 | > | not a good design */ | 
| 101 | > | } | 
| 102 |  |  | 
| 103 |  |  | 
| 104 | < | void ParallelRandNumGen::seed( const uint32 oneSeed ) { | 
| 104 | > | void ParallelRandNumGen::seed( const uint32 oneSeed ) { | 
| 105 |  |  | 
| 106 |  | const int masterNode = 0; | 
| 107 | < | int seed = oneSeed; | 
| 107 | > | unsigned long seed = oneSeed; | 
| 108 | > | #ifdef IS_MPI | 
| 109 |  | MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 110 | < |  | 
| 110 | > | #endif | 
| 111 |  | if (seed != oneSeed) { | 
| 112 | < | sprintf(painCave.errMsg, | 
| 113 | < | "Using different seed to initialize ParallelRandNumGen.\n"); | 
| 114 | < | painCave.isFatal = 1;; | 
| 115 | < | simError(); | 
| 112 | > | sprintf(painCave.errMsg, | 
| 113 | > | "Using different seed to initialize ParallelRandNumGen.\n"); | 
| 114 | > | painCave.isFatal = 1;; | 
| 115 | > | simError(); | 
| 116 |  | } | 
| 117 | < |  | 
| 118 | < | int newSeed = oneSeed +nCreatedRNG_; | 
| 117 | > |  | 
| 118 | > | unsigned long newSeed = oneSeed +nCreatedRNG_; | 
| 119 |  | mtRand_->seed(newSeed); | 
| 120 | < |  | 
| 120 | > |  | 
| 121 |  | ++nCreatedRNG_; | 
| 122 | < | } | 
| 122 | > | } | 
| 123 |  |  | 
| 124 | < | void ParallelRandNumGen::seed() { | 
| 124 | > | void ParallelRandNumGen::seed() { | 
| 125 |  |  | 
| 126 |  | std::vector<uint32> bigSeed; | 
| 127 |  | int size; | 
| 128 |  | const int masterNode = 0; | 
| 129 | + | #ifdef IS_MPI | 
| 130 |  | if (worldRank == masterNode) { | 
| 131 | < | bigSeed = mtRand_->generateSeeds(); | 
| 101 | < | size = bigSeed.size(); | 
| 102 | < | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 103 | < | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 131 | > | #endif | 
| 132 |  |  | 
| 133 | + | bigSeed = mtRand_->generateSeeds(); | 
| 134 | + | size = bigSeed.size(); | 
| 135 | + |  | 
| 136 | + | #ifdef IS_MPI | 
| 137 | + | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 138 | + | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 139 |  | }else { | 
| 140 | < | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 141 | < | bigSeed.resize(size); | 
| 142 | < | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 140 | > | MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); | 
| 141 | > | bigSeed.resize(size); | 
| 142 | > | MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); | 
| 143 |  | } | 
| 144 | + | #endif | 
| 145 |  |  | 
| 146 |  | if (bigSeed.size() == 1) { | 
| 147 | < | mtRand_->seed(bigSeed[0]); | 
| 147 | > | mtRand_->seed(bigSeed[0]); | 
| 148 |  | } else { | 
| 149 | < | mtRand_->seed(&bigSeed[0], bigSeed.size()); | 
| 149 | > | mtRand_->seed(&bigSeed[0], bigSeed.size()); | 
| 150 |  | } | 
| 151 |  |  | 
| 152 |  | ++nCreatedRNG_; | 
| 153 | < | } | 
| 119 | < |  | 
| 120 | < |  | 
| 153 | > | } | 
| 154 |  | } | 
| 122 | – |  | 
| 123 | – | #endif |