ViewVC Help
View File | Revision Log | Show Annotations | View Changeset | Root Listing
root/group/trunk/OOPSE-2.0/src/brains/BlockSnapshotManager.cpp
Revision: 2039
Committed: Wed Feb 16 20:43:10 2005 UTC (19 years, 4 months ago) by tim
File size: 7831 byte(s)
Log Message:
using PhysMem - RSSMem to estimate avaliable memory

File Contents

# Content
1 /*
2 * Copyright (c) 2005 The University of Notre Dame. All Rights Reserved.
3 *
4 * The University of Notre Dame grants you ("Licensee") a
5 * non-exclusive, royalty free, license to use, modify and
6 * redistribute this software in source and binary code form, provided
7 * that the following conditions are met:
8 *
9 * 1. Acknowledgement of the program authors must be made in any
10 * publication of scientific results based in part on use of the
11 * program. An acceptable form of acknowledgement is citation of
12 * the article in which the program was described (Matthew
13 * A. Meineke, Charles F. Vardeman II, Teng Lin, Christopher
14 * J. Fennell and J. Daniel Gezelter, "OOPSE: An Object-Oriented
15 * Parallel Simulation Engine for Molecular Dynamics,"
16 * J. Comput. Chem. 26, pp. 252-271 (2005))
17 *
18 * 2. Redistributions of source code must retain the above copyright
19 * notice, this list of conditions and the following disclaimer.
20 *
21 * 3. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the
24 * distribution.
25 *
26 * This software is provided "AS IS," without a warranty of any
27 * kind. All express or implied conditions, representations and
28 * warranties, including any implied warranty of merchantability,
29 * fitness for a particular purpose or non-infringement, are hereby
30 * excluded. The University of Notre Dame and its licensors shall not
31 * be liable for any damages suffered by licensee as a result of
32 * using, modifying or distributing the software or its
33 * derivatives. In no event will the University of Notre Dame or its
34 * licensors be liable for any lost revenue, profit or data, or for
35 * direct, indirect, special, consequential, incidental or punitive
36 * damages, however caused and regardless of the theory of liability,
37 * arising out of the use of or inability to use software, even if the
38 * University of Notre Dame has been advised of the possibility of
39 * such damages.
40 */
41 #include <algorithm>
42 #include "brains/BlockSnapshotManager.hpp"
43 #include "utils/residentMem.h"
44 #include "utils/physmem.h"
45 #include "utils/Algorithm.hpp"
46 #include "brains/SimInfo.hpp"
47 #include "io/DumpReader.hpp"
48
49 namespace oopse {
50 BlockSnapshotManager::BlockSnapshotManager(SimInfo* info, const std::string& filename,
51 int storageLayout, int blockCapacity)
52 : SnapshotManager(storageLayout), info_(info), blockCapacity_(blockCapacity),
53 activeBlocks_(blockCapacity_, -1), activeRefCount_(blockCapacity_, 0) {
54
55 nAtoms_ = info->getNGlobalAtoms();
56 nRigidBodies_ = info->getNGlobalRigidBodies();
57
58 double physMem = physmem_total();
59 double rssMem = residentMem();
60 double avaliablePhysMem = physMem - rssMem;
61
62 std::cout << "physmem = " << int(physMem) << "\trssMem = "<< rssMem << "\t availablePhysMem = " << avaliablePhysMem <<std::endl;
63 int bytesPerStuntDouble = DataStorage::getBytesPerStuntDouble(storageLayout);
64
65 int bytesPerFrame = (nRigidBodies_ + nAtoms_) * bytesPerStuntDouble;
66
67 int frameCapacity = int (avaliablePhysMem / bytesPerFrame);
68
69 nSnapshotPerBlock_ = frameCapacity /blockCapacity_ ;
70
71 reader_ = new DumpReader(info, filename);
72 nframes_ = reader_->getNFrames();
73
74 int nblocks = nframes_ / nSnapshotPerBlock_;
75 if (nframes_ % nSnapshotPerBlock_ != 0) {
76 ++nblocks;
77 }
78
79 for (int i = 0; i < nblocks; ++i) {
80 blocks_.push_back(SnapshotBlock(i*nSnapshotPerBlock_, (i+1)*nSnapshotPerBlock_));
81 }
82 //the last block may not have nSnapshotPerBlock frames, we need to consider this special situation
83 blocks_.back().second = nframes_;
84
85 snapshots_.insert(snapshots_.begin(), nframes_, static_cast<Snapshot*>(NULL));
86
87 }
88
89
90 BlockSnapshotManager::~BlockSnapshotManager() {
91 currentSnapshot_ = NULL;
92 previousSnapshot_ = NULL;
93
94 delete reader_;
95
96 std::vector<int>::iterator i;
97 for (i = activeBlocks_.begin(); i != activeBlocks_.end(); ++i) {
98 if (*i != -1) {
99 unloadBlock(*i);
100 }
101 }
102 }
103
104 int BlockSnapshotManager::getNActiveBlocks() {
105 #ifdef __RWSTD
106 int count = 0;
107 std::count_if(activeBlocks_.begin(), activeBlocks_.end(), std::bind2nd(std::not_equal_to<int>(), -1), count);
108 return count;
109 #else
110 return std::count_if(activeBlocks_.begin(), activeBlocks_.end(), std::bind2nd(std::not_equal_to<int>(), -1));
111 #endif
112 }
113
114
115
116 bool BlockSnapshotManager::loadBlock(int block) {
117 std::vector<int>::iterator i = findActiveBlock(block);
118 bool loadSuccess;
119 if (i != activeBlocks_.end()) {
120 //if block is already in memory, just increast the reference count
121 ++activeRefCount_[i - activeBlocks_.begin()];
122 loadSuccess = true;
123 } else if (getNActiveBlocks() < blockCapacity_){
124 //if number of active blocks is less than the block capacity, just load it
125 internalLoad(block);
126 loadSuccess = true;
127 } else if (hasZeroRefBlock() > 0) {
128 //if already reach the block capacity, need to unload a block with 0 reference
129 int zeroRefBlock = getFirstZeroRefBlock();
130 assert(zeroRefBlock != -1);
131 internalUnload(zeroRefBlock);
132 internalLoad(block);
133 } else {
134 //reach the capacity and all blocks in memory are not zero reference
135 loadSuccess = false;
136 }
137
138 return loadSuccess;
139 }
140
141 bool BlockSnapshotManager::unloadBlock(int block) {
142 bool unloadSuccess;
143 std::vector<int>::iterator i = findActiveBlock(block);
144
145 if (i != activeBlocks_.end()){
146 --activeRefCount_[i - activeBlocks_.begin()];
147 if (activeRefCount_[i - activeBlocks_.begin()] < 0) {
148 //in case, unloadBlock called multiple times
149 activeRefCount_[i - activeBlocks_.begin()] = 0;
150 }
151
152 unloadSuccess = true;
153 } else {
154 unloadSuccess = false;
155 }
156
157 return unloadSuccess;
158 }
159
160 void BlockSnapshotManager::internalLoad(int block) {
161
162 for (int i = blocks_[block].first; i < blocks_[block].second; ++i) {
163 snapshots_[i] = loadFrame(i);
164 }
165
166 std::vector<int>::iterator j;
167 j = std::find(activeBlocks_.begin(), activeBlocks_.end(), -1);
168 assert(j != activeBlocks_.end());
169 *j = block;
170 ++activeRefCount_[j - activeBlocks_.begin()];
171 }
172
173 void BlockSnapshotManager::internalUnload(int block) {
174 for (int i = blocks_[block].first; i < blocks_[block].second; ++i) {
175 delete snapshots_[i];
176 snapshots_[i] = NULL;
177 }
178 std::vector<int>::iterator j;
179 j = std::find(activeBlocks_.begin(), activeBlocks_.end(), block);
180 assert(j != activeBlocks_.end());
181 *j = -1;
182 }
183
184 bool BlockSnapshotManager::hasZeroRefBlock(){
185 return std::find(activeRefCount_.begin(), activeRefCount_.end(), 0) != activeRefCount_.end() ? true : false;
186 }
187
188 int BlockSnapshotManager::getFirstZeroRefBlock(){
189 std::vector<int>::iterator i = std::find(activeRefCount_.begin(), activeRefCount_.end(), 0);
190 return i != activeRefCount_.end() ? activeBlocks_[i - activeRefCount_.begin()] : -1;
191 }
192
193 std::vector<int> BlockSnapshotManager::getActiveBlocks() {
194 std::vector<int> result;
195 oopse::copy_if(activeBlocks_.begin(), activeBlocks_.end(), std::back_inserter(result),
196 std::bind2nd(std::not_equal_to<int>(), -1));
197 return result;
198 }
199
200 Snapshot* BlockSnapshotManager::loadFrame(int frame){
201 Snapshot* snapshot = new Snapshot(nAtoms_, nRigidBodies_, getStorageLayout());
202 snapshot->setID(frame);
203
204 /** @todo fixed me */
205 Snapshot* oldSnapshot = currentSnapshot_;
206 currentSnapshot_ = snapshot;
207 reader_->readFrame(frame);
208 currentSnapshot_ = oldSnapshot;
209 return snapshot;
210 }
211
212 int BlockSnapshotManager::getNFrames() {
213 return reader_->getNFrames();
214 }
215
216 }