From 46405e109a923965fc66c916332331ebd043e896 Mon Sep 17 00:00:00 2001 From: Andrei Strelkovskii Date: Tue, 23 Apr 2024 02:39:54 +0200 Subject: [PATCH] issue-852: detecting sequential access pattern and caching results not only per node but per handle as well (#1029) * issue-852: detecting sequential access pattern and caching results not only per node but per handle as well * issue-852: detecting sequential access pattern and caching results not only per node but per handle as well - improved ut --- .../libs/storage/tablet/model/read_ahead.cpp | 86 ++++++-- .../libs/storage/tablet/model/read_ahead.h | 34 ++- .../storage/tablet/model/read_ahead_ut.cpp | 201 +++++++++++++++++- .../storage/tablet/tablet_actor_readdata.cpp | 17 +- .../libs/storage/tablet/tablet_state.h | 3 + .../libs/storage/tablet/tablet_state_data.cpp | 9 +- .../storage/tablet/tablet_state_sessions.cpp | 4 + 7 files changed, 317 insertions(+), 37 deletions(-) diff --git a/cloud/filestore/libs/storage/tablet/model/read_ahead.cpp b/cloud/filestore/libs/storage/tablet/model/read_ahead.cpp index 88f3e37ad4e..4dbbfa50883 100644 --- a/cloud/filestore/libs/storage/tablet/model/read_ahead.cpp +++ b/cloud/filestore/libs/storage/tablet/model/read_ahead.cpp @@ -6,6 +6,10 @@ namespace { //////////////////////////////////////////////////////////////////////////////// +constexpr ui64 NoHandle = Max(); + +//////////////////////////////////////////////////////////////////////////////// + bool IsCloseToSequential( const TRingBuffer& byteRanges, ui32 maxGapPercentage) @@ -69,8 +73,11 @@ void TReadAheadCache::Reset( NodeStates.clear(); } -bool TReadAheadCache::TryFillResult( +//////////////////////////////////////////////////////////////////////////////// + +bool TReadAheadCache::TryFillResultImpl( ui64 nodeId, + ui64 handle, const TByteRange& range, NProtoPrivate::TDescribeDataResponse* response) { @@ -79,8 +86,13 @@ bool TReadAheadCache::TryFillResult( return false; } - for (ui32 i = 0; i < nodeState->DescribeResults.Size(); ++i) { - const auto& result = nodeState->DescribeResults.Back(i); + auto* handleState = nodeState->HandleStates.FindPtr(handle); + if (!handleState) { + return false; + } + + for (ui32 i = 0; i < handleState->DescribeResults.Size(); ++i) { + const auto& result = handleState->DescribeResults.Back(i); if (result.Range.Contains(range)) { FilterResult(range, result.Response, response); return true; @@ -90,17 +102,29 @@ bool TReadAheadCache::TryFillResult( return false; } -TMaybe TReadAheadCache::RegisterDescribe( +bool TReadAheadCache::TryFillResult( ui64 nodeId, - const TByteRange inputRange) + ui64 handle, + const TByteRange& range, + NProtoPrivate::TDescribeDataResponse* response) { - if (!RangeSize) { - return {}; + if (TryFillResultImpl(nodeId, handle, range, response)) { + return true; } - auto& nodeState = Access(nodeId); - nodeState.LastRanges.PushBack(TRange(inputRange)); - if (IsCloseToSequential(nodeState.LastRanges, MaxGapPercentage) + return TryFillResultImpl(nodeId, NoHandle, range, response); +} + +//////////////////////////////////////////////////////////////////////////////// + +TMaybe TReadAheadCache::RegisterDescribeImpl( + ui64 nodeId, + ui64 handle, + const TByteRange inputRange) +{ + auto& handleState = Access(nodeId, handle); + handleState.LastRanges.PushBack(TRange(inputRange)); + if (IsCloseToSequential(handleState.LastRanges, MaxGapPercentage) && inputRange.Length < RangeSize) { return TByteRange(inputRange.Offset, RangeSize, inputRange.BlockSize); @@ -109,32 +133,62 @@ TMaybe TReadAheadCache::RegisterDescribe( return {}; } +TMaybe TReadAheadCache::RegisterDescribe( + ui64 nodeId, + ui64 handle, + const TByteRange inputRange) +{ + if (!RangeSize) { + return {}; + } + + auto result = RegisterDescribeImpl(nodeId, handle, inputRange); + if (result) { + return result; + } + + return RegisterDescribeImpl(nodeId, NoHandle, inputRange); +} + +//////////////////////////////////////////////////////////////////////////////// + void TReadAheadCache::InvalidateCache(ui64 nodeId) { NodeStates.clear(nodeId); } +void TReadAheadCache::OnDestroyHandle(ui64 nodeId, ui64 handle) +{ + if (auto* nodeState = NodeStates.FindPtr(nodeId)) { + nodeState->HandleStates.erase(handle); + } +} + +//////////////////////////////////////////////////////////////////////////////// + void TReadAheadCache::RegisterResult( ui64 nodeId, + ui64 handle, const TByteRange& range, const NProtoPrivate::TDescribeDataResponse& result) { - Access(nodeId).DescribeResults.PushBack({TRange(range), result}); + Access(nodeId, handle).DescribeResults.PushBack({TRange(range), result}); + Access(nodeId, NoHandle).DescribeResults.PushBack({TRange(range), result}); } -TReadAheadCache::TNodeState& TReadAheadCache::Access(ui64 nodeId) +TReadAheadCache::THandleState& TReadAheadCache::Access(ui64 nodeId, ui64 handle) { // TODO: LRU eviction if (NodeStates.size() >= MaxNodes && !NodeStates.contains(nodeId)) { NodeStates.clear(); } - auto& nodeState = NodeStates[nodeId]; - if (!nodeState.DescribeResults.Capacity()) { - nodeState.DescribeResults.Reset(MaxResultsPerNode); + auto& handleState = NodeStates[nodeId].HandleStates[handle]; + if (!handleState.DescribeResults.Capacity()) { + handleState.DescribeResults.Reset(MaxResultsPerNode); } - return nodeState; + return handleState; } //////////////////////////////////////////////////////////////////////////////// diff --git a/cloud/filestore/libs/storage/tablet/model/read_ahead.h b/cloud/filestore/libs/storage/tablet/model/read_ahead.h index fed4dd3d85c..d5d765d5203 100644 --- a/cloud/filestore/libs/storage/tablet/model/read_ahead.h +++ b/cloud/filestore/libs/storage/tablet/model/read_ahead.h @@ -63,19 +63,24 @@ class TReadAheadCache using TDescribeResults = TRingBuffer; using TByteRanges = TRingBuffer; - struct TNodeState + struct THandleState { static const ui32 RANGE_COUNT = 32; TByteRanges LastRanges; TDescribeResults DescribeResults; - TNodeState() + THandleState() : LastRanges(RANGE_COUNT) , DescribeResults(0) { } }; + struct TNodeState + { + THashMap HandleStates; + }; + using TNodeStates = THashMap; private: @@ -88,7 +93,7 @@ class TReadAheadCache ui32 MaxGapPercentage = 0; public: - TReadAheadCache(IAllocator* allocator); + explicit TReadAheadCache(IAllocator* allocator); ~TReadAheadCache(); void Reset( @@ -99,16 +104,20 @@ class TReadAheadCache bool TryFillResult( ui64 nodeId, + ui64 handle, const TByteRange& range, NProtoPrivate::TDescribeDataResponse* response); // returns the suggested range to describe TMaybe RegisterDescribe( ui64 nodeId, + ui64 handle, const TByteRange inputRange); void InvalidateCache(ui64 nodeId); + void OnDestroyHandle(ui64 nodeId, ui64 handle); void RegisterResult( ui64 nodeId, + ui64 handle, const TByteRange& range, const NProtoPrivate::TDescribeDataResponse& result); @@ -118,7 +127,24 @@ class TReadAheadCache } private: - TNodeState& Access(ui64 nodeId); + THandleState& Access(ui64 nodeId, ui64 handle); + + bool TryFillResultImpl( + ui64 nodeId, + ui64 handle, + const TByteRange& range, + NProtoPrivate::TDescribeDataResponse* response); + + TMaybe RegisterDescribeImpl( + ui64 nodeId, + ui64 handle, + const TByteRange inputRange); + + void RegisterResultImpl( + ui64 nodeId, + ui64 handle, + const TByteRange& range, + const NProtoPrivate::TDescribeDataResponse& result); }; //////////////////////////////////////////////////////////////////////////////// diff --git a/cloud/filestore/libs/storage/tablet/model/read_ahead_ut.cpp b/cloud/filestore/libs/storage/tablet/model/read_ahead_ut.cpp index 7e87ec526d9..8fcbc6f29e0 100644 --- a/cloud/filestore/libs/storage/tablet/model/read_ahead_ut.cpp +++ b/cloud/filestore/libs/storage/tablet/model/read_ahead_ut.cpp @@ -27,12 +27,21 @@ struct TDefaultCache: TReadAheadCache //////////////////////////////////////////////////////////////////////////////// +constexpr ui64 Handle1 = 1; +constexpr ui64 Handle2 = 2; +constexpr ui64 Handle3 = 3; + TByteRange MakeRange(ui64 offset, ui32 len) { return TByteRange(offset, len, 4_KB); } -void RegisterResult(TDefaultCache& cache, ui64 nodeId, ui64 offset, ui32 len) +void RegisterResult( + TReadAheadCache& cache, + ui64 nodeId, + ui64 handle, + ui64 offset, + ui32 len) { NProtoPrivate::TDescribeDataResponse result; auto* p = result.AddBlobPieces(); @@ -45,8 +54,13 @@ void RegisterResult(TDefaultCache& cache, ui64 nodeId, ui64 offset, ui32 len) range->SetLength(len); range->SetBlobOffset(0); - cache.RegisterResult(nodeId, MakeRange(offset, len), result); -}; + cache.RegisterResult(nodeId, handle, MakeRange(offset, len), result); +} + +void RegisterResult(TReadAheadCache& cache, ui64 nodeId, ui64 offset, ui32 len) +{ + return RegisterResult(cache, nodeId, Handle1, offset, len); +} TString Expected( ui64 nodeId, @@ -75,10 +89,20 @@ TString Expected(ui64 nodeId, ui64 offset, ui32 len, ui32 blobOffset) return Expected(nodeId, offset, len, 1, 2, 3, 1, blobOffset); } -TString FillResult(TDefaultCache& cache, ui64 nodeId, ui64 offset, ui32 len) +TString FillResult( + TReadAheadCache& cache, + ui64 nodeId, + ui64 handle, + ui64 offset, + ui32 len) { NProtoPrivate::TDescribeDataResponse result; - if (cache.TryFillResult(nodeId, MakeRange(offset, len), &result)) { + const bool filled = cache.TryFillResult( + nodeId, + handle, + MakeRange(offset, len), &result); + + if (filled) { const auto& bps = result.GetBlobPieces(); UNIT_ASSERT_VALUES_EQUAL(1, bps.size()); const auto& branges = bps[0].GetRanges(); @@ -93,8 +117,14 @@ TString FillResult(TDefaultCache& cache, ui64 nodeId, ui64 offset, ui32 len) bps[0].GetBSGroupId(), branges[0].GetBlobOffset()); } + return {}; -}; +} + +TString FillResult(TReadAheadCache& cache, ui64 nodeId, ui64 offset, ui32 len) +{ + return FillResult(cache, nodeId, Handle1, offset, len); +} } // namespace @@ -115,6 +145,7 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) while (offset < 1_MB - requestSize) { r = cache.RegisterDescribe( nodeId, + Handle1, TByteRange(offset, requestSize, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); offset += requestSize; @@ -123,6 +154,7 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) while (offset < 10_MB) { r = cache.RegisterDescribe( nodeId, + Handle1, TByteRange(offset, requestSize, blockSize)); UNIT_ASSERT(r); UNIT_ASSERT_VALUES_EQUAL( @@ -133,6 +165,7 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) r = cache.RegisterDescribe( nodeId, + Handle1, TByteRange(100_MB, requestSize, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); } @@ -144,25 +177,42 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) TDefaultCache cache; TMaybe r; - r = cache.RegisterDescribe(nodeId, TByteRange(0, 128_KB, blockSize)); + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(0, 128_KB, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); - r = cache.RegisterDescribe(nodeId, TByteRange(128_KB, 128_KB, blockSize)); + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(128_KB, 128_KB, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); - r = cache.RegisterDescribe(nodeId, TByteRange(512_KB, 256_KB, blockSize)); + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(512_KB, 256_KB, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); - r = cache.RegisterDescribe(nodeId, TByteRange(384_KB, 128_KB, blockSize)); + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(384_KB, 128_KB, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); - r = cache.RegisterDescribe(nodeId, TByteRange(768_KB, 256_KB, blockSize)); + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(768_KB, 256_KB, blockSize)); UNIT_ASSERT(r); UNIT_ASSERT_VALUES_EQUAL( TByteRange(768_KB, 1_MB, blockSize).Describe(), r->Describe()); r = cache.RegisterDescribe( nodeId, + Handle1, TByteRange(1_MB + 256_KB, 256_KB, blockSize)); UNIT_ASSERT_C(!r, r.GetRef().Describe()); r = cache.RegisterDescribe( nodeId, + Handle1, TByteRange(1_MB + 512_KB, 384_KB, blockSize)); UNIT_ASSERT(r); UNIT_ASSERT_VALUES_EQUAL( @@ -170,6 +220,48 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) r->Describe()); } + Y_UNIT_TEST(ShouldDetectPureSequentialReadOver2Handles) + { + const ui64 nodeId = 111; + const ui32 blockSize = 4_KB; + const ui32 requestSize = 32 * blockSize; + + TDefaultCache cache; + + TMaybe r; + TVector handles = {Handle1, Handle2}; + ui32 handleIdx = 0; + ui64 offset = 0; + while (offset < 1_MB - requestSize) { + r = cache.RegisterDescribe( + nodeId, + handles[handleIdx], + TByteRange(offset, requestSize, blockSize)); + UNIT_ASSERT_C(!r, r.GetRef().Describe()); + offset += requestSize; + handleIdx = (handleIdx + 1) % handles.size(); + } + + while (offset < 10_MB) { + r = cache.RegisterDescribe( + nodeId, + handles[handleIdx], + TByteRange(offset, requestSize, blockSize)); + UNIT_ASSERT(r); + UNIT_ASSERT_VALUES_EQUAL( + TByteRange(offset, 1_MB, blockSize).Describe(), + r->Describe()); + offset += requestSize; + handleIdx = (handleIdx + 1) % handles.size(); + } + + r = cache.RegisterDescribe( + nodeId, + Handle1, + TByteRange(100_MB, requestSize, blockSize)); + UNIT_ASSERT_C(!r, r.GetRef().Describe()); + } + Y_UNIT_TEST(ShouldCacheResults) { TDefaultCache cache; @@ -220,6 +312,93 @@ Y_UNIT_TEST_SUITE(TReadAheadTest) FillResult(cache, 222, 106_MB - 128_KB, 128_KB)); } + Y_UNIT_TEST(ShouldCacheResultsByHandle) + { + TReadAheadCache cache(TDefaultAllocator::Instance()); + cache.Reset(32, 4, 1_MB, 20); + + RegisterResult(cache, 111, Handle1, 0, 1_MB); + RegisterResult(cache, 111, Handle2, 1_MB, 1_MB); + RegisterResult(cache, 111, Handle1, 2_MB, 1_MB); + RegisterResult(cache, 111, Handle2, 3_MB, 1_MB); + RegisterResult(cache, 111, Handle1, 4_MB, 1_MB); + RegisterResult(cache, 111, Handle2, 5_MB, 1_MB); + + // first 2 results already evicted from the per-node state + UNIT_ASSERT_VALUES_EQUAL( + "", + FillResult(cache, 111, Handle3, 0, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + "", + FillResult(cache, 111, Handle3, 1_MB, 1_MB)); + + // the next 4 results are present in the per-node state + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 2_MB, 1_MB, 0), + FillResult(cache, 111, Handle3, 2_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 3_MB, 1_MB, 0), + FillResult(cache, 111, Handle3, 3_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 4_MB, 1_MB, 0), + FillResult(cache, 111, Handle3, 4_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 5_MB, 1_MB, 0), + FillResult(cache, 111, Handle3, 5_MB, 1_MB)); + + // only the second result is unavailable for Handle1 lookups + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 0, 1_MB, 0), + FillResult(cache, 111, Handle1, 0_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + "", + FillResult(cache, 111, Handle1, 1_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 2_MB, 1_MB, 0), + FillResult(cache, 111, Handle1, 2_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 3_MB, 1_MB, 0), + FillResult(cache, 111, Handle1, 3_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 4_MB, 1_MB, 0), + FillResult(cache, 111, Handle1, 4_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 5_MB, 1_MB, 0), + FillResult(cache, 111, Handle1, 5_MB, 1_MB)); + + // only the first result is unavailable for Handle2 lookups + UNIT_ASSERT_VALUES_EQUAL( + "", + FillResult(cache, 111, Handle2, 0_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 1_MB, 1_MB, 0), + FillResult(cache, 111, Handle2, 1_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 2_MB, 1_MB, 0), + FillResult(cache, 111, Handle2, 2_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 3_MB, 1_MB, 0), + FillResult(cache, 111, Handle2, 3_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 4_MB, 1_MB, 0), + FillResult(cache, 111, Handle2, 4_MB, 1_MB)); + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 5_MB, 1_MB, 0), + FillResult(cache, 111, Handle2, 5_MB, 1_MB)); + + cache.OnDestroyHandle(111, Handle2); + + // results still present for Handle1 + UNIT_ASSERT_VALUES_EQUAL( + Expected(111, 0, 1_MB, 0), + FillResult(cache, 111, Handle1, 0_MB, 1_MB)); + + // but dropped for Handle2 + UNIT_ASSERT_VALUES_EQUAL( + "", + FillResult(cache, 111, Handle2, 1_MB, 1_MB)); + } + Y_UNIT_TEST(ShouldEvictNodesAndResults) { TDefaultCache cache; diff --git a/cloud/filestore/libs/storage/tablet/tablet_actor_readdata.cpp b/cloud/filestore/libs/storage/tablet/tablet_actor_readdata.cpp index 64b50369e31..ac3f0d532ed 100644 --- a/cloud/filestore/libs/storage/tablet/tablet_actor_readdata.cpp +++ b/cloud/filestore/libs/storage/tablet/tablet_actor_readdata.cpp @@ -612,8 +612,14 @@ void TIndexTabletActor::HandleDescribeData( auto* handle = FindHandle(msg->Record.GetHandle()); const ui64 nodeId = handle ? handle->GetNodeId() : InvalidNodeId; NProtoPrivate::TDescribeDataResponse result; - if (TryFillDescribeResult(nodeId, byteRange, &result)) { - RegisterDescribe(nodeId, byteRange); + const bool filled = TryFillDescribeResult( + nodeId, + msg->Record.GetHandle(), + byteRange, + &result); + + if (filled) { + RegisterDescribe(nodeId, msg->Record.GetHandle(), byteRange); auto response = std::make_unique(); @@ -689,6 +695,7 @@ bool TIndexTabletActor::PrepareTx_ReadData( // support classes which don't have an assignment operator auto readAheadRange = RegisterDescribe( args.NodeId, + args.Handle, args.AlignedByteRange); if (readAheadRange.Defined()) { args.ReadAheadRange.ConstructInPlace(*readAheadRange); @@ -804,7 +811,11 @@ void TIndexTabletActor::CompleteTx_ReadData( *args.ReadAheadRange, args, &record); - RegisterReadAheadResult(args.NodeId, *args.ReadAheadRange, record); + RegisterReadAheadResult( + args.NodeId, + args.Handle, + *args.ReadAheadRange, + record); } auto response = diff --git a/cloud/filestore/libs/storage/tablet/tablet_state.h b/cloud/filestore/libs/storage/tablet/tablet_state.h index 1840cbd313e..29930064996 100644 --- a/cloud/filestore/libs/storage/tablet/tablet_state.h +++ b/cloud/filestore/libs/storage/tablet/tablet_state.h @@ -1056,14 +1056,17 @@ FILESTORE_DUPCACHE_REQUESTS(FILESTORE_DECLARE_DUPCACHE) public: bool TryFillDescribeResult( ui64 nodeId, + ui64 handle, const TByteRange& range, NProtoPrivate::TDescribeDataResponse* response); TMaybe RegisterDescribe( ui64 nodeId, + ui64 handle, const TByteRange inputRange); void InvalidateReadAheadCache(ui64 nodeId); void RegisterReadAheadResult( ui64 nodeId, + ui64 handle, const TByteRange& range, const NProtoPrivate::TDescribeDataResponse& result); TReadAheadCacheStats CalculateReadAheadCacheStats() const; diff --git a/cloud/filestore/libs/storage/tablet/tablet_state_data.cpp b/cloud/filestore/libs/storage/tablet/tablet_state_data.cpp index ed4d87e23c8..499983efdfa 100644 --- a/cloud/filestore/libs/storage/tablet/tablet_state_data.cpp +++ b/cloud/filestore/libs/storage/tablet/tablet_state_data.cpp @@ -1138,17 +1138,19 @@ void TIndexTabletState::CompleteForcedRangeOperation() bool TIndexTabletState::TryFillDescribeResult( ui64 nodeId, + ui64 handle, const TByteRange& range, NProtoPrivate::TDescribeDataResponse* response) { - return Impl->ReadAheadCache.TryFillResult(nodeId, range, response); + return Impl->ReadAheadCache.TryFillResult(nodeId, handle, range, response); } TMaybe TIndexTabletState::RegisterDescribe( ui64 nodeId, + ui64 handle, const TByteRange inputRange) { - return Impl->ReadAheadCache.RegisterDescribe(nodeId, inputRange); + return Impl->ReadAheadCache.RegisterDescribe(nodeId, handle, inputRange); } void TIndexTabletState::InvalidateReadAheadCache(ui64 nodeId) @@ -1158,10 +1160,11 @@ void TIndexTabletState::InvalidateReadAheadCache(ui64 nodeId) void TIndexTabletState::RegisterReadAheadResult( ui64 nodeId, + ui64 handle, const TByteRange& range, const NProtoPrivate::TDescribeDataResponse& result) { - Impl->ReadAheadCache.RegisterResult(nodeId, range, result); + Impl->ReadAheadCache.RegisterResult(nodeId, handle, range, result); } TReadAheadCacheStats TIndexTabletState::CalculateReadAheadCacheStats() const diff --git a/cloud/filestore/libs/storage/tablet/tablet_state_sessions.cpp b/cloud/filestore/libs/storage/tablet/tablet_state_sessions.cpp index 94dee193441..a0e591c7f8d 100644 --- a/cloud/filestore/libs/storage/tablet/tablet_state_sessions.cpp +++ b/cloud/filestore/libs/storage/tablet/tablet_state_sessions.cpp @@ -533,6 +533,10 @@ void TIndexTabletState::DestroyHandle( ReleaseLocks(db, handle->GetHandle()); + Impl->ReadAheadCache.OnDestroyHandle( + handle->GetNodeId(), + handle->GetHandle()); + RemoveHandle(handle); }