From 45cce5f5862c3a225abbda8dcf3ed86fa261ca07 Mon Sep 17 00:00:00 2001 From: Qiaolin Yu Date: Sun, 12 Nov 2023 02:00:16 -0500 Subject: [PATCH] add metadata cache pool --- CMakeLists.txt | 2 +- include/rocksdb/cache.h | 81 +++++++++++++++---- .../block_based/block_based_table_builder.cc | 2 +- .../block_based/block_based_table_factory.cc | 4 +- table/block_based/block_based_table_reader.cc | 12 +-- .../block_based_table_reader_impl.h | 4 +- 6 files changed, 79 insertions(+), 26 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 6643acb08..cac1d9eda 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -353,7 +353,7 @@ if (NOT WITH_PERF_CONTEXT) add_definitions(-DNPERF_CONTEXT) endif() -option(FAIL_ON_WARNINGS "Treat compile warnings as errors" ON) +option(FAIL_ON_WARNINGS "Treat compile warnings as errors" OFF) if(FAIL_ON_WARNINGS) if(MSVC) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /WX") diff --git a/include/rocksdb/cache.h b/include/rocksdb/cache.h index 11c7da77e..ba462f72d 100644 --- a/include/rocksdb/cache.h +++ b/include/rocksdb/cache.h @@ -294,6 +294,7 @@ extern std::shared_ptr NewClockCache( class Cache { public: + std::shared_ptr metadata_cache; // Depending on implementation, cache entries with higher priority levels // could be less likely to get evicted than entries with lower priority // levels. The "high" priority level applies to certain SST metablocks (e.g. @@ -421,6 +422,17 @@ class Cache { DeleterFn deleter, Handle** handle = nullptr, Priority priority = Priority::LOW) = 0; + virtual Status IntelligentInsert(const Slice& key, void* value, size_t charge, + DeleterFn deleter, Handle** handle = nullptr, + Priority priority = Priority::LOW, + bool isDataType = true) { + if (isDataType) { + return Insert(key, value, charge, deleter, handle, priority); + } + return metadata_cache->Insert(key, value, charge, deleter, handle, + priority); + } + // If the cache has no mapping for "key", returns nullptr. // // Else return a handle that corresponds to the mapping. The caller @@ -430,6 +442,15 @@ class Cache { // function. virtual Handle* Lookup(const Slice& key, Statistics* stats = nullptr) = 0; + virtual Handle* IntelligentLookup(const Slice& key, + Statistics* stats = nullptr, + bool isDataType = true) { + if (isDataType) { + return Lookup(key, stats); + } + return metadata_cache->Lookup(key, stats); + } + // Increments the reference count for the handle if it refers to an entry in // the cache. Returns true if refcount was incremented; otherwise, returns // false. @@ -439,9 +460,9 @@ class Cache { /** * Release a mapping returned by a previous Lookup(). A released entry might * still remain in cache in case it is later looked up by others. If - * erase_if_last_ref is set then it also erases it from the cache if there is - * no other reference to it. Erasing it should call the deleter function that - * was provided when the entry was inserted. + * erase_if_last_ref is set then it also erases it from the cache if there + * is no other reference to it. Erasing it should call the deleter function + * that was provided when the entry was inserted. * * Returns true if the entry was also erased. */ @@ -455,6 +476,13 @@ class Cache { // REQUIRES: handle must have been returned by a method on *this. virtual void* Value(Handle* handle) = 0; + virtual void* IntelligentValue(Handle* handle, bool isDataType = true) { + if (isDataType) { + return Value(handle); + } + return metadata_cache->Value(handle); + } + // If the cache contains the entry for the key, erase it. Note that the // underlying entry will be kept around until all existing handles // to it have been released. @@ -606,11 +634,22 @@ class Cache { return Insert(key, value, charge, helper->del_cb, handle, priority); } - // Lookup the key in the primary and secondary caches (if one is configured). - // The create_cb callback function object will be used to contruct the - // cached object. - // If none of the caches have the mapping for the key, returns nullptr. - // Else, returns a handle that corresponds to the mapping. + virtual Status IntelligentInsert(const Slice& key, void* value, + const CacheItemHelper* helper, size_t charge, + Handle** handle = nullptr, + Priority priority = Priority::LOW, + bool isDataType = true) { + if (isDataType) { + return Insert(key, value, helper, charge, handle, priority); + } + return metadata_cache->Insert(key, value, helper, charge, handle, priority); + } + + // Lookup the key in the primary and secondary caches (if one is + // configured). The create_cb callback function object will be used to + // contruct the cached object. If none of the caches have the mapping for + // the key, returns nullptr. Else, returns a handle that corresponds to the + // mapping. // // This call may promote the object from the secondary cache (if one is // configured, and has the given key) to the primary cache. @@ -632,12 +671,13 @@ class Cache { // will not block. // // IMPORTANT: Pending handles are not thread-safe, and only these functions - // are allowed on them: Value(), IsReady(), Wait(), WaitAll(). Even Release() - // can only come after Wait() or WaitAll() even though a reference is held. + // are allowed on them: Value(), IsReady(), Wait(), WaitAll(). Even + // Release() can only come after Wait() or WaitAll() even though a reference + // is held. // // Only Wait()/WaitAll() gets a Handle out of a Pending state. (Waiting is - // safe and has no effect on other handle states.) After waiting on a Handle, - // it is in one of two states: + // safe and has no effect on other handle states.) After waiting on a + // Handle, it is in one of two states: // * Present - if Value() != nullptr // * Failed - if Value() == nullptr, such as if the secondary cache // initially thought it had the value but actually did not. @@ -653,6 +693,19 @@ class Cache { return Lookup(key, stats); } + virtual Handle* IntelligentLookup(const Slice& key, + const CacheItemHelper* helper_cb, + const CreateCallback& create_cb, + Priority priority, bool wait, + Statistics* stats = nullptr, + bool isDataType = true) { + if (isDataType) { + return Lookup(key, helper_cb, create_cb, priority, wait, stats); + } + return metadata_cache->Lookup(key, helper_cb, create_cb, priority, wait, + stats); + } + // Release a mapping returned by a previous Lookup(). The "useful" // parameter specifies whether the data was actually used or not, // which may be used by the cache implementation to decide whether @@ -691,8 +744,8 @@ class Cache { // // Developer notes: Adding a new enum to this class requires corresponding // updates to `kCacheEntryRoleToCamelString` and -// `kCacheEntryRoleToHyphenString`. Do not add to this enum after `kMisc` since -// `kNumCacheEntryRoles` assumes `kMisc` comes last. +// `kCacheEntryRoleToHyphenString`. Do not add to this enum after `kMisc` +// since `kNumCacheEntryRoles` assumes `kMisc` comes last. enum class CacheEntryRole { // Block-based table data block kDataBlock, diff --git a/table/block_based/block_based_table_builder.cc b/table/block_based/block_based_table_builder.cc index c79a44830..a4dc681b7 100644 --- a/table/block_based/block_based_table_builder.cc +++ b/table/block_based/block_based_table_builder.cc @@ -1521,7 +1521,7 @@ Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents, assert(block_holder->own_bytes()); size_t charge = block_holder->ApproximateMemoryUsage(); - s = block_cache->Insert( + s = block_cache->IntelligentInsert( key.AsSlice(), block_holder.get(), BlocklikeTraits::GetCacheItemHelper(block_type), charge, nullptr, Cache::Priority::LOW); diff --git a/table/block_based/block_based_table_factory.cc b/table/block_based/block_based_table_factory.cc index 09c1d2f62..bada016d2 100644 --- a/table/block_based/block_based_table_factory.cc +++ b/table/block_based/block_based_table_factory.cc @@ -550,9 +550,9 @@ Status CheckCacheOptionCompatibility(const BlockBasedTableOptions& bbto) { // If we get something different from what we inserted, that indicates // dangerously overlapping key spaces. if (bbto.block_cache) { - auto handle = bbto.block_cache->Lookup(sentinel_key.AsSlice()); + auto handle = bbto.block_cache->IntelligentLookup(sentinel_key.AsSlice()); if (handle) { - auto v = static_cast(bbto.block_cache->Value(handle)); + auto v = static_cast(bbto.block_cache->IntelligentValue(handle)); char c = *v; bbto.block_cache->Release(handle); if (v == &kCompressedBlockCacheMarker) { diff --git a/table/block_based/block_based_table_reader.cc b/table/block_based/block_based_table_reader.cc index d1911a432..b5a905b54 100644 --- a/table/block_based/block_based_table_reader.cc +++ b/table/block_based/block_based_table_reader.cc @@ -384,16 +384,16 @@ Cache::Handle* BlockBasedTable::GetEntryFromCache( const Cache::CreateCallback& create_cb, Cache::Priority priority) const { Cache::Handle* cache_handle = nullptr; if (cache_tier == CacheTier::kNonVolatileBlockTier) { - cache_handle = block_cache->Lookup(key, cache_helper, create_cb, priority, + cache_handle = block_cache->IntelligentLookup(key, cache_helper, create_cb, priority, wait, rep_->ioptions.statistics.get()); } else { - cache_handle = block_cache->Lookup(key, rep_->ioptions.statistics.get()); + cache_handle = block_cache->IntelligentLookup(key, rep_->ioptions.statistics.get()); } // Avoid updating metrics here if the handle is not complete yet. This // happens with MultiGet and secondary cache. So update the metrics only // if its a miss, or a hit and value is ready - if (!cache_handle || block_cache->Value(cache_handle)) { + if (!cache_handle || block_cache->IntelligentValue(cache_handle)) { if (cache_handle != nullptr) { UpdateCacheHitMetrics(block_type, get_context, block_cache->GetUsage(cache_handle)); @@ -413,10 +413,10 @@ Status BlockBasedTable::InsertEntryToCache( Cache::Handle** cache_handle, Cache::Priority priority) const { Status s = Status::OK(); if (cache_tier == CacheTier::kNonVolatileBlockTier) { - s = block_cache->Insert(key, block_holder.get(), cache_helper, charge, + s = block_cache->IntelligentInsert(key, block_holder.get(), cache_helper, charge, cache_handle, priority); } else { - s = block_cache->Insert(key, block_holder.get(), charge, + s = block_cache->IntelligentInsert(key, block_holder.get(), charge, cache_helper->del_cb, cache_handle, priority); } if (s.ok()) { @@ -1316,7 +1316,7 @@ Status BlockBasedTable::GetDataBlockFromCache( priority); if (cache_handle != nullptr) { out_parsed_block->SetCachedValue( - reinterpret_cast(block_cache->Value(cache_handle)), + reinterpret_cast(block_cache->IntelligentValue(cache_handle)), block_cache, cache_handle); return s; } diff --git a/table/block_based/block_based_table_reader_impl.h b/table/block_based/block_based_table_reader_impl.h index d877a375a..894e33845 100644 --- a/table/block_based/block_based_table_reader_impl.h +++ b/table/block_based/block_based_table_reader_impl.h @@ -93,7 +93,7 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator( // insert a dummy record to block cache to track the memory usage Cache::Handle* cache_handle = nullptr; CacheKey key = CacheKey::CreateUniqueForCacheLifetime(block_cache); - s = block_cache->Insert(key.AsSlice(), nullptr, + s = block_cache->IntelligentInsert(key.AsSlice(), nullptr, block.GetValue()->ApproximateMemoryUsage(), nullptr, &cache_handle); @@ -151,7 +151,7 @@ TBlockIter* BlockBasedTable::NewDataBlockIterator(const ReadOptions& ro, // insert a dummy record to block cache to track the memory usage Cache::Handle* cache_handle = nullptr; CacheKey key = CacheKey::CreateUniqueForCacheLifetime(block_cache); - s = block_cache->Insert(key.AsSlice(), nullptr, + s = block_cache->IntelligentInsert(key.AsSlice(), nullptr, block.GetValue()->ApproximateMemoryUsage(), nullptr, &cache_handle);