--- trunk/src/math/ParallelRandNumGen.cpp 2008/10/22 20:01:49 1313 +++ branches/development/src/math/ParallelRandNumGen.cpp 2012/09/13 14:10:11 1798 @@ -6,19 +6,10 @@ * redistribute this software in source and binary code form, provided * that the following conditions are met: * - * 1. Acknowledgement of the program authors must be made in any - * publication of scientific results based in part on use of the - * program. An acceptable form of acknowledgement is citation of - * the article in which the program was described (Matthew - * A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher - * J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented - * Parallel Simulation Engine for Molecular Dynamics," - * J. Comput. Chem. 26, pp. 252-271 (2005)) - * - * 2. Redistributions of source code must retain the above copyright + * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * - * 3. Redistributions in binary form must reproduce the above copyright + * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the * distribution. @@ -37,6 +28,16 @@ * arising out of the use of or inability to use software, even if the * University of Notre Dame has been advised of the possibility of * such damages. + * + * SUPPORT OPEN SCIENCE! If you use OpenMD or its source code in your + * research, please cite the appropriate papers when you publish your + * work. Good starting points are: + * + * [1] Meineke, et al., J. Comp. Chem. 26, 252-271 (2005). + * [2] Fennell & Gezelter, J. Chem. Phys. 124, 234104 (2006). + * [3] Sun, Lin & Gezelter, J. Chem. Phys. 128, 24107 (2008). + * [4] Kuang & Gezelter, J. Chem. Phys. 133, 164101 (2010). + * [5] Vardeman, Stocker & Gezelter, J. Chem. Theory Comput. 7, 834 (2011). */ #include "math/ParallelRandNumGen.hpp" @@ -44,17 +45,17 @@ #include #endif -namespace oopse { +namespace OpenMD { int ParallelRandNumGen::nCreatedRNG_ = 0; ParallelRandNumGen::ParallelRandNumGen(const uint32& oneSeed) { - const int masterNode = 0; unsigned long seed = oneSeed; #ifdef IS_MPI - MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); + const int masterNode = 0; + MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode); #endif if (seed != oneSeed) { @@ -66,8 +67,8 @@ namespace oopse { int nProcessors; #ifdef IS_MPI - MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); - MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); + nProcessors = MPI::COMM_WORLD.Get_size(); + myRank_ = MPI::COMM_WORLD.Get_rank(); #else nProcessors = 1; myRank_ = 0; @@ -85,11 +86,10 @@ namespace oopse { ParallelRandNumGen::ParallelRandNumGen() { std::vector bigSeed; - const int masterNode = 0; int nProcessors; #ifdef IS_MPI - MPI_Comm_size( MPI_COMM_WORLD, &nProcessors); - MPI_Comm_rank( MPI_COMM_WORLD, &myRank_); + nProcessors = MPI::COMM_WORLD.Get_size(); + myRank_ = MPI::COMM_WORLD.Get_rank(); #else nProcessors = 1; myRank_ = 0; @@ -103,10 +103,10 @@ namespace oopse { void ParallelRandNumGen::seed( const uint32 oneSeed ) { - const int masterNode = 0; unsigned long seed = oneSeed; #ifdef IS_MPI - MPI_Bcast(&seed, 1, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); + const int masterNode = 0; + MPI::COMM_WORLD.Bcast(&seed, 1, MPI::UNSIGNED_LONG, masterNode); #endif if (seed != oneSeed) { sprintf(painCave.errMsg, @@ -124,22 +124,23 @@ namespace oopse { void ParallelRandNumGen::seed() { std::vector bigSeed; + +#ifdef IS_MPI int size; const int masterNode = 0; -#ifdef IS_MPI if (worldRank == masterNode) { #endif bigSeed = mtRand_->generateSeeds(); - size = bigSeed.size(); #ifdef IS_MPI - MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); - MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); + size = bigSeed.size(); + MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode); + MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode); }else { - MPI_Bcast(&size, 1, MPI_INT, masterNode, MPI_COMM_WORLD); + MPI::COMM_WORLD.Bcast(&size, 1, MPI::INT, masterNode); bigSeed.resize(size); - MPI_Bcast(&bigSeed[0], size, MPI_UNSIGNED_LONG, masterNode, MPI_COMM_WORLD); + MPI::COMM_WORLD.Bcast(&bigSeed[0], size, MPI::UNSIGNED_LONG, masterNode); } #endif