31
31
namespace zim
32
32
{
33
33
34
- template <typename CostEstimation>
35
- struct FutureToValueCostEstimation {
36
- template <typename T>
37
- static size_t cost (const std::shared_future<T>& future) {
38
- // The future is the value in the cache.
39
- // When calling getOrPut, if the key is not in the cache,
40
- // we add a future and then we compute the value and set the future.
41
- // But lrucache call us when we add the future, meaning before we have
42
- // computed the value. If we wait here (or use future.get), we will dead lock
43
- // as we need to exit before setting the value.
44
- // So in this case, we return 0. `ConcurrentCache::getOrPut` will correctly increase
45
- // the current cache size when it have an actual value.
46
- // We still need to compute the size of the value if the future has a value as it
47
- // is also use to decrease the cache size when the value is drop.
48
- std::future_status status = future.wait_for (std::chrono::nanoseconds::zero ());
49
- if (status == std::future_status::ready) {
50
- return CostEstimation::cost (future.get ());
51
- } else {
52
- return 0 ;
53
- }
54
- }
55
-
56
- };
57
-
58
34
/* *
59
35
ConcurrentCache implements a concurrent thread-safe cache
60
36
@@ -65,15 +41,15 @@ struct FutureToValueCostEstimation {
65
41
available.
66
42
*/
67
43
template <typename Key, typename Value, typename CostEstimation>
68
- class ConcurrentCache : private lru_cache <Key, std::shared_future<Value>, FutureToValueCostEstimation<CostEstimation>>
44
+ class ConcurrentMemLimitedCache
69
45
{
70
46
private: // types
71
- typedef std::shared_future<Value> ValuePlaceholder;
72
- typedef lru_cache<Key, ValuePlaceholder, FutureToValueCostEstimation<CostEstimation> > Impl;
47
+ typedef std::shared_future<std::pair< size_t , Value> > ValuePlaceholder;
48
+ typedef lru_cache<Key, ValuePlaceholder> Impl;
73
49
74
50
public: // types
75
- explicit ConcurrentCache (size_t maxCost)
76
- : Impl (maxCost)
51
+ explicit ConcurrentMemLimitedCache (size_t maxCost)
52
+ : maxCost (maxCost), currentCost( 0 ), impl_(( size_t )-1 )
77
53
{}
78
54
79
55
// Gets the entry corresponding to the given key. If the entry is not in the
@@ -88,63 +64,102 @@ class ConcurrentCache: private lru_cache<Key, std::shared_future<Value>, FutureT
88
64
template <class F >
89
65
Value getOrPut (const Key& key, F f)
90
66
{
91
- std::promise<Value> valuePromise;
67
+ std::promise<std::pair< size_t , Value> > valuePromise;
92
68
std::unique_lock<std::mutex> l (lock_);
93
- const auto x = Impl:: getOrPut (key, valuePromise.get_future ().share ());
69
+ const auto x = impl_. getOrPut (key, valuePromise.get_future ().share ());
94
70
l.unlock ();
95
71
if ( x.miss () ) {
96
72
try {
97
- valuePromise.set_value (f ());
98
- auto cost = CostEstimation::cost (x.value ().get ());
73
+ auto item = f ();
74
+ auto cost = CostEstimation::cost (item);
75
+ valuePromise.set_value (std::make_pair (cost, item));
99
76
// There is a small window when the valuePromise may be drop from lru cache after
100
77
// we set the value but before we increase the size of the cache.
101
78
// In this case decrease the size of `cost` before increasing it.
102
79
// First of all it should be pretty rare as we have just put the future in the cache so it
103
80
// should not be the least used item.
104
81
// If it happens, this should not be a problem if current_size is bigger than `cost` (most of the time)
105
- // For the really rare specific case of current cach size being lower than `cost` (if possible),
82
+ // For the really rare specific case of curreLasnt cach size being lower than `cost` (if possible),
106
83
// `decreaseCost` will clamp the new size to 0.
107
84
{
108
85
std::unique_lock<std::mutex> l (lock_);
109
- Impl:: increaseCost (cost);
86
+ increaseCost (cost);
110
87
}
111
88
} catch (std::exception & e) {
112
89
drop (key);
113
90
throw ;
114
91
}
115
92
}
116
93
117
- return x.value ().get ();
94
+ return std::get< 1 >( x.value ().get () );
118
95
}
119
96
120
- bool drop (const Key& key)
97
+ void drop (const Key& key)
121
98
{
122
99
std::unique_lock<std::mutex> l (lock_);
123
- return Impl::drop (key);
100
+ auto dropped = impl_.drop (key);
101
+ currentCost -= get_future_cost (dropped);
124
102
}
125
103
126
104
template <class F >
127
105
void dropAll (F f) {
128
106
std::unique_lock<std::mutex> l (lock_);
129
- Impl::dropAll (f);
107
+ for (auto dropped:impl_.dropAll (f)) {
108
+ currentCost -= get_future_cost (dropped);
109
+ }
130
110
}
131
111
132
112
size_t getMaxCost () const {
133
113
std::unique_lock<std::mutex> l (lock_);
134
- return Impl::getMaxCost () ;
114
+ return maxCost ;
135
115
}
136
116
137
117
size_t getCurrentCost () const {
138
118
std::unique_lock<std::mutex> l (lock_);
139
- return Impl::cost () ;
119
+ return currentCost ;
140
120
}
141
121
142
- void setMaxCost (size_t newSize ) {
122
+ void setMaxCost (size_t newMaxCost ) {
143
123
std::unique_lock<std::mutex> l (lock_);
144
- return Impl::setMaxCost (newSize);
124
+ maxCost = newMaxCost;
125
+ while (currentCost>maxCost) {
126
+ auto dropped = impl_.dropLast ();
127
+ currentCost -= get_future_cost (dropped);
128
+ }
129
+ }
130
+
131
+ private:
132
+ static size_t get_future_cost (ValuePlaceholder& future) {
133
+ // The future is the value in the cache.
134
+ // When calling getOrPut, if the key is not in the cache,
135
+ // we add a future and then we compute the value and set the future.
136
+ // But lrucache call us when we add the future, meaning before we have
137
+ // computed the value. If we wait here (or use future.get), we will dead lock
138
+ // as we need to exit before setting the value.
139
+ // So in this case, we return 0. `ConcurrentCache::getOrPut` will correctly increase
140
+ // the current cache size when it have an actual value.
141
+ // We still need to compute the size of the value if the future has a value as it
142
+ // is also use to decrease the cache size when the value is drop.
143
+ std::future_status status = future.wait_for (std::chrono::nanoseconds::zero ());
144
+ if (status == std::future_status::ready) {
145
+ return std::get<0 >(future.get ());
146
+ } else {
147
+ return 0 ;
148
+ }
149
+ }
150
+
151
+ void increaseCost (size_t newCost) {
152
+ currentCost += newCost;
153
+ while (currentCost>maxCost && impl_.size () >1 ) {
154
+ auto dropped = impl_.dropLast ();
155
+ currentCost -= std::get<0 >(dropped.get ());
156
+ }
145
157
}
146
158
147
159
private: // data
160
+ size_t maxCost;
161
+ size_t currentCost;
162
+ mutable Impl impl_;
148
163
mutable std::mutex lock_;
149
164
};
150
165
0 commit comments