Skip to content

Commit 67d1bd1

Browse files
committed
Rename size to cost
To fixup between the two first commits.
1 parent c6cb773 commit 67d1bd1

File tree

4 files changed

+60
-52
lines changed

4 files changed

+60
-52
lines changed

src/concurrent_cache.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -103,10 +103,10 @@ class ConcurrentCache: private lru_cache<Key, std::shared_future<Value>, FutureT
103103
// should not be the least used item.
104104
// If it happens, this should not be a problem if current_size is bigger than `cost` (most of the time)
105105
// For the really rare specific case of current cach size being lower than `cost` (if possible),
106-
// `decreaseSize` will clamp the new size to 0.
106+
// `decreaseCost` will clamp the new size to 0.
107107
{
108108
std::unique_lock<std::mutex> l(lock_);
109-
Impl::increaseSize(cost);
109+
Impl::increaseCost(cost);
110110
}
111111
} catch (std::exception& e) {
112112
drop(key);
@@ -131,17 +131,17 @@ class ConcurrentCache: private lru_cache<Key, std::shared_future<Value>, FutureT
131131

132132
size_t getMaxSize() const {
133133
std::unique_lock<std::mutex> l(lock_);
134-
return Impl::getMaxSize();
134+
return Impl::getMaxCost();
135135
}
136136

137137
size_t getCurrentSize() const {
138138
std::unique_lock<std::mutex> l(lock_);
139-
return Impl::size();
139+
return Impl::cost();
140140
}
141141

142142
void setMaxSize(size_t newSize) {
143143
std::unique_lock<std::mutex> l(lock_);
144-
return Impl::setMaxSize(newSize);
144+
return Impl::setMaxCost(newSize);
145145
}
146146

147147
private: // data

src/dirent_accessor.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -55,9 +55,9 @@ class LIBZIM_PRIVATE_API DirectDirentAccessor
5555
std::shared_ptr<const Dirent> getDirent(entry_index_t idx) const;
5656
entry_index_t getDirentCount() const { return m_direntCount; }
5757

58-
size_t getMaxCacheSize() const { return m_direntCache.getMaxSize(); }
59-
size_t getCurrentCacheSize() const { return m_direntCache.size(); }
60-
void setMaxCacheSize(size_t nbDirents) const { m_direntCache.setMaxSize(nbDirents); }
58+
size_t getMaxCacheSize() const { return m_direntCache.getMaxCost(); }
59+
size_t getCurrentCacheSize() const { return m_direntCache.cost(); }
60+
void setMaxCacheSize(size_t nbDirents) const { m_direntCache.setMaxCost(nbDirents); }
6161

6262
private: // functions
6363
std::shared_ptr<const Dirent> readDirent(offset_t) const;

src/lrucache.h

+34-26
Original file line numberDiff line numberDiff line change
@@ -71,6 +71,9 @@ struct UnitCostEstimation {
7171
* This lru is parametrized by a CostEstimation type. The type must have a static method `cost`
7272
* taking a reference to a `value_t` and returing its "cost". As already said, this method must
7373
* always return the same cost for the same value.
74+
*
75+
* While cost could be any kind of value, this implemention is intended to be used only with
76+
* `UnitCostEstimation` (classic lru) and `FutureToValueCostEstimation<ClusterMemorySize>`.
7477
*/
7578
template<typename key_t, typename value_t, typename CostEstimation>
7679
class lru_cache {
@@ -107,9 +110,9 @@ class lru_cache {
107110
};
108111

109112
public: // functions
110-
explicit lru_cache(size_t max_size) :
111-
_max_size(max_size),
112-
_current_size(0)
113+
explicit lru_cache(size_t max_cost) :
114+
_max_cost(max_cost),
115+
_current_cost(0)
113116
{}
114117

115118
// If 'key' is present in the cache, returns the associated value,
@@ -130,8 +133,8 @@ class lru_cache {
130133
auto it = _cache_items_map.find(key);
131134
if (it != _cache_items_map.end()) {
132135
_cache_items_list.splice(_cache_items_list.begin(), _cache_items_list, it->second);
133-
decreaseSize(CostEstimation::cost(it->second->second));
134-
increaseSize(CostEstimation::cost(value));
136+
decreaseCost(CostEstimation::cost(it->second->second));
137+
increaseCost(CostEstimation::cost(value));
135138
it->second->second = value;
136139
} else {
137140
putMissing(key, value);
@@ -155,7 +158,7 @@ class lru_cache {
155158
} catch (std::out_of_range& e) {
156159
return false;
157160
}
158-
decreaseSize(CostEstimation::cost(list_it->second));
161+
decreaseCost(CostEstimation::cost(list_it->second));
159162
_cache_items_list.erase(list_it);
160163
_cache_items_map.erase(key);
161164
return true;
@@ -180,56 +183,56 @@ class lru_cache {
180183
return _cache_items_map.find(key) != _cache_items_map.end();
181184
}
182185

183-
size_t size() const {
184-
return _current_size;
186+
size_t cost() const {
187+
return _current_cost;
185188
}
186189

187-
size_t getMaxSize() const {
188-
return _max_size;
190+
size_t getMaxCost() const {
191+
return _max_cost;
189192
}
190193

191-
void setMaxSize(size_t newSize) {
192-
while (newSize < this->size()) {
194+
void setMaxCost(size_t newMaxCost) {
195+
while (newMaxCost < this->cost()) {
193196
dropLast();
194197
}
195-
_max_size = newSize;
198+
_max_cost = newMaxCost;
196199
}
197200

198201
protected:
199202

200-
void increaseSize(size_t extra_size) {
203+
void increaseCost(size_t extra_cost) {
201204
// increaseSize is called after we have added a value to the cache to update
202205
// the size of the current cache.
203206
// We must ensure that we don't drop the value we just added.
204207
// While it is technically ok to keep no value if max cache size is 0 (or memory size < of the size of one cluster)
205208
// it will make recreate the value all the time.
206209
// Let's be nice with our user and be tolerent to misconfiguration.
207-
if (!extra_size) {
210+
if (!extra_cost) {
208211
// Don't try to remove an item if we have new size == 0.
209212
// This is the case when concurent cache add a future without value.
210213
// We will handle the real increase size when concurent cache will directly call us.
211214
return;
212215
}
213-
_current_size += extra_size;
214-
while (_current_size > _max_size && _cache_items_list.size() > 1) {
216+
_current_cost += extra_cost;
217+
while (_current_cost > _max_cost && size() > 1) {
215218
dropLast();
216219
}
217220
}
218221

219-
void decreaseSize(size_t sizeToRemove) {
220-
if (sizeToRemove > _current_size) {
221-
std::cerr << "WARNING: We have detected inconsistant cache management, trying to remove " << sizeToRemove << " from a cache with size " << _current_size << std::endl;
222+
void decreaseCost(size_t costToRemove) {
223+
if (costToRemove > _current_cost) {
224+
std::cerr << "WARNING: We have detected inconsistant cache management, trying to remove " << costToRemove << " from a cache with size " << _current_cost << std::endl;
222225
std::cerr << "Please open an issue on https://github.com/openzim/libzim/issues with this message and the zim file you use" << std::endl;
223-
_current_size = 0;
226+
_current_cost = 0;
224227
} else {
225-
_current_size -= sizeToRemove;
228+
_current_cost -= costToRemove;
226229
}
227230
}
228231

229232
private: // functions
230233
void dropLast() {
231234
auto list_it = _cache_items_list.back();
232-
decreaseSize(CostEstimation::cost(list_it.second));
235+
decreaseCost(CostEstimation::cost(list_it.second));
233236
_cache_items_map.erase(_cache_items_list.back().first);
234237
_cache_items_list.pop_back();
235238
}
@@ -238,14 +241,19 @@ class lru_cache {
238241
assert(_cache_items_map.find(key) == _cache_items_map.end());
239242
_cache_items_list.push_front(key_value_pair_t(key, value));
240243
_cache_items_map[key] = _cache_items_list.begin();
241-
increaseSize(CostEstimation::cost(value));
244+
increaseCost(CostEstimation::cost(value));
245+
}
246+
247+
size_t size() const {
248+
return _cache_items_map.size();
242249
}
243250

251+
244252
private: // data
245253
std::list<key_value_pair_t> _cache_items_list;
246254
std::map<key_t, list_iterator_t> _cache_items_map;
247-
size_t _max_size;
248-
size_t _current_size;
255+
size_t _max_cost;
256+
size_t _current_cost;
249257
};
250258

251259
} // namespace zim

test/lrucache.cpp

+18-18
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@ TEST(CacheTest, SimplePut) {
4242
cache_lru.put(7, 777);
4343
EXPECT_TRUE(cache_lru.exists(7));
4444
EXPECT_EQ(777, cache_lru.get(7));
45-
EXPECT_EQ(1u, cache_lru.size());
45+
EXPECT_EQ(1u, cache_lru.cost());
4646
}
4747

4848
TEST(CacheTest, OverwritingPut) {
@@ -51,7 +51,7 @@ TEST(CacheTest, OverwritingPut) {
5151
cache_lru.put(7, 222);
5252
EXPECT_TRUE(cache_lru.exists(7));
5353
EXPECT_EQ(222, cache_lru.get(7));
54-
EXPECT_EQ(1u, cache_lru.size());
54+
EXPECT_EQ(1u, cache_lru.cost());
5555
}
5656

5757
TEST(CacheTest, MissingValue) {
@@ -66,13 +66,13 @@ TEST(CacheTest, DropValue) {
6666
cache_lru.put(7, 777);
6767
cache_lru.put(8, 888);
6868
cache_lru.put(9, 999);
69-
EXPECT_EQ(3u, cache_lru.size());
69+
EXPECT_EQ(3u, cache_lru.cost());
7070
EXPECT_TRUE(cache_lru.exists(7));
7171
EXPECT_EQ(777, cache_lru.get(7));
7272

7373
EXPECT_TRUE(cache_lru.drop(7));
7474

75-
EXPECT_EQ(2u, cache_lru.size());
75+
EXPECT_EQ(2u, cache_lru.cost());
7676
EXPECT_FALSE(cache_lru.exists(7));
7777
EXPECT_THROW(cache_lru.get(7).value(), std::range_error);
7878

@@ -91,25 +91,25 @@ TEST(CacheTest, VariableCost) {
9191
cache_lru.put(1, 11);
9292
cache_lru.put(2, 22);
9393
cache_lru.put(3, 33);
94-
EXPECT_EQ(66u, cache_lru.size());
94+
EXPECT_EQ(66u, cache_lru.cost());
9595

9696
cache_lru.put(4, 44);
97-
EXPECT_EQ(99u, cache_lru.size());
97+
EXPECT_EQ(99u, cache_lru.cost());
9898
EXPECT_FALSE(cache_lru.exists(1));
9999
EXPECT_TRUE(cache_lru.exists(2));
100100
EXPECT_TRUE(cache_lru.exists(3));
101101
EXPECT_TRUE(cache_lru.exists(4));
102102

103103
cache_lru.put(5, 55);
104-
EXPECT_EQ(99u, cache_lru.size());
104+
EXPECT_EQ(99u, cache_lru.cost());
105105
EXPECT_FALSE(cache_lru.exists(1));
106106
EXPECT_FALSE(cache_lru.exists(2));
107107
EXPECT_FALSE(cache_lru.exists(3));
108108
EXPECT_TRUE(cache_lru.exists(4));
109109
EXPECT_TRUE(cache_lru.exists(5));
110110

111111
cache_lru.put(1, 11);
112-
EXPECT_EQ(66u, cache_lru.size());
112+
EXPECT_EQ(66u, cache_lru.cost());
113113
EXPECT_TRUE(cache_lru.exists(1));
114114
EXPECT_FALSE(cache_lru.exists(2));
115115
EXPECT_FALSE(cache_lru.exists(3));
@@ -121,22 +121,22 @@ TEST(CacheTest, TooBigValue) {
121121
zim::lru_cache<size_t, size_t, IdCost> cache_lru(10);
122122

123123
cache_lru.put(1, 11);
124-
EXPECT_EQ(11u, cache_lru.size());
124+
EXPECT_EQ(11u, cache_lru.cost());
125125
EXPECT_TRUE(cache_lru.exists(1));
126126

127127
cache_lru.put(2, 22);
128-
EXPECT_EQ(22u, cache_lru.size());
128+
EXPECT_EQ(22u, cache_lru.cost());
129129
EXPECT_FALSE(cache_lru.exists(1));
130130
EXPECT_TRUE(cache_lru.exists(2));
131131

132132
cache_lru.put(3, 33);
133-
EXPECT_EQ(33u, cache_lru.size());
133+
EXPECT_EQ(33u, cache_lru.cost());
134134
EXPECT_FALSE(cache_lru.exists(1));
135135
EXPECT_FALSE(cache_lru.exists(2));
136136
EXPECT_TRUE(cache_lru.exists(3));
137137

138138
cache_lru.put(1, 11);
139-
EXPECT_EQ(11u, cache_lru.size());
139+
EXPECT_EQ(11u, cache_lru.cost());
140140
EXPECT_TRUE(cache_lru.exists(1));
141141
EXPECT_FALSE(cache_lru.exists(2));
142142
EXPECT_FALSE(cache_lru.exists(3));
@@ -166,7 +166,7 @@ TEST(CacheTest1, KeepsAllValuesWithinCapacity) {
166166

167167
EXPECT_RANGE_FULLY_IN_CACHE(cache_lru, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY), NUM_OF_TEST2_RECORDS, 1)
168168

169-
size_t size = cache_lru.size();
169+
size_t size = cache_lru.cost();
170170
EXPECT_EQ(TEST2_CACHE_CAPACITY, size);
171171
}
172172

@@ -177,20 +177,20 @@ TEST(CacheTest1, ChangeCacheCapacity) {
177177
cache_lru.put(i, i);
178178
}
179179

180-
EXPECT_EQ(TEST2_CACHE_CAPACITY, cache_lru.size());
180+
EXPECT_EQ(TEST2_CACHE_CAPACITY, cache_lru.cost());
181181
EXPECT_RANGE_MISSING_FROM_CACHE(cache_lru, 0, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY))
182182
EXPECT_RANGE_FULLY_IN_CACHE(cache_lru, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY), NUM_OF_TEST2_RECORDS, 1)
183183

184-
cache_lru.setMaxSize(TEST2_CACHE_CAPACITY_SMALL);
185-
EXPECT_EQ(TEST2_CACHE_CAPACITY_SMALL, cache_lru.size());
184+
cache_lru.setMaxCost(TEST2_CACHE_CAPACITY_SMALL);
185+
EXPECT_EQ(TEST2_CACHE_CAPACITY_SMALL, cache_lru.cost());
186186
EXPECT_RANGE_MISSING_FROM_CACHE(cache_lru, 0, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY_SMALL))
187187
EXPECT_RANGE_FULLY_IN_CACHE(cache_lru, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY_SMALL), NUM_OF_TEST2_RECORDS, 1)
188188

189-
cache_lru.setMaxSize(TEST2_CACHE_CAPACITY);
189+
cache_lru.setMaxCost(TEST2_CACHE_CAPACITY);
190190
for (int i = 0; i < NUM_OF_TEST2_RECORDS; ++i) {
191191
cache_lru.put(i, 1000*i);
192192
}
193-
EXPECT_EQ(TEST2_CACHE_CAPACITY, cache_lru.size());
193+
EXPECT_EQ(TEST2_CACHE_CAPACITY, cache_lru.cost());
194194
EXPECT_RANGE_MISSING_FROM_CACHE(cache_lru, 0, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY))
195195
EXPECT_RANGE_FULLY_IN_CACHE(cache_lru, (NUM_OF_TEST2_RECORDS - TEST2_CACHE_CAPACITY), NUM_OF_TEST2_RECORDS, 1000)
196196
}

0 commit comments

Comments
 (0)