Skip to content

Commit 05b606a

Browse files
Added logging to ConcurrentCache
1 parent 4ab3061 commit 05b606a

File tree

2 files changed

+41
-1
lines changed

2 files changed

+41
-1
lines changed

src/concurrent_cache.h

+11-1
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@
2222
#define ZIM_CONCURRENT_CACHE_H
2323

2424
#include "lrucache.h"
25+
#include "log.h"
2526

2627
#include <chrono>
2728
#include <cstddef>
@@ -88,15 +89,21 @@ class ConcurrentCache: private lru_cache<Key, std::shared_future<Value>, FutureT
8889
template<class F>
8990
Value getOrPut(const Key& key, F f)
9091
{
92+
log_debug_func_call("ConcurrentCache::getOrPut", key);
93+
9194
std::promise<Value> valuePromise;
9295
std::unique_lock<std::mutex> l(lock_);
9396
auto shared_future = valuePromise.get_future().share();
9497
const auto x = Impl::getOrPut(key, shared_future);
9598
l.unlock();
99+
log_debug("Obtained the cache slot");
96100
if ( x.miss() ) {
101+
log_debug("It was a cache miss. Going to obtain the value...");
97102
try {
98103
valuePromise.set_value(f());
104+
log_debug("Value was successfully obtained. Computing its cost...");
99105
auto cost = CostEstimation::cost(x.value().get());
106+
log_debug("cost=" << cost << ". Committing to cache...");
100107
// There is a small window when the valuePromise may be drop from lru cache after
101108
// we set the value but before we increase the size of the cache.
102109
// In this case we decrease the size of `cost` before increasing it.
@@ -117,17 +124,20 @@ class ConcurrentCache: private lru_cache<Key, std::shared_future<Value>, FutureT
117124
Impl::increaseCost(cost);
118125
}
119126
}
127+
log_debug("Done. Cache cost is at " << getCurrentCost() );
120128
} catch (std::exception& e) {
129+
log_debug("Evaluation failed. Releasing the cache slot...");
121130
drop(key);
122131
throw;
123132
}
124133
}
125134

126-
return x.value().get();
135+
return log_debug_return_value(x.value().get());
127136
}
128137

129138
bool drop(const Key& key)
130139
{
140+
log_debug_func_call("ConcurrentCache::drop", key);
131141
std::unique_lock<std::mutex> l(lock_);
132142
return Impl::drop(key);
133143
}

test/concurrentcache.cpp

+30
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,8 @@
1717
*
1818
*/
1919

20+
#define LIBZIM_ENABLE_LOGGING
21+
2022
#include "concurrent_cache.h"
2123
#include "gtest/gtest.h"
2224

@@ -38,3 +40,31 @@ TEST(ConcurrentCacheTest, handleException) {
3840
EXPECT_THROW(cache.getOrPut(8, ExceptionSource()), std::runtime_error);
3941
EXPECT_EQ(cache.getOrPut(8, LazyValue(888)), 888);
4042
}
43+
44+
TEST(ConcurrentCacheXRayTest, simpleFlow) {
45+
zim::Logging::logIntoMemory();
46+
zim::ConcurrentCache<int, int, zim::UnitCostEstimation> cache(10);
47+
EXPECT_EQ(cache.getOrPut(3, LazyValue(2025)), 2025);
48+
EXPECT_EQ(cache.getOrPut(3, LazyValue(123)), 2025);
49+
EXPECT_THROW(cache.getOrPut(2, ExceptionSource()), std::runtime_error);
50+
51+
ASSERT_EQ(zim::Logging::getInMemLogContent(),
52+
R"(thread#0: ConcurrentCache::getOrPut(3) {
53+
thread#0: Obtained the cache slot
54+
thread#0: It was a cache miss. Going to obtain the value...
55+
thread#0: Value was successfully obtained. Computing its cost...
56+
thread#0: cost=1. Committing to cache...
57+
thread#0: Done. Cache cost is at 1
58+
thread#0: } (return value: 2025)
59+
thread#0: ConcurrentCache::getOrPut(3) {
60+
thread#0: Obtained the cache slot
61+
thread#0: } (return value: 2025)
62+
thread#0: ConcurrentCache::getOrPut(2) {
63+
thread#0: Obtained the cache slot
64+
thread#0: It was a cache miss. Going to obtain the value...
65+
thread#0: Evaluation failed. Releasing the cache slot...
66+
thread#0: ConcurrentCache::drop(2) {
67+
thread#0: } (return value: )
68+
thread#0: } (return value: )
69+
)");
70+
}

0 commit comments

Comments
 (0)