From 55ee1c21441fb7cdc62a6516d3a629ab055e8507 Mon Sep 17 00:00:00 2001 From: wypb Date: Thu, 27 Jun 2024 14:47:20 +0800 Subject: [PATCH] Fix HashStringAllocator::clear() and cumulativeBytes_ --- velox/common/memory/HashStringAllocator.cpp | 40 ++++++++++----------- velox/common/memory/HashStringAllocator.h | 2 ++ 2 files changed, 21 insertions(+), 21 deletions(-) diff --git a/velox/common/memory/HashStringAllocator.cpp b/velox/common/memory/HashStringAllocator.cpp index 813b90a1714ff..b62e111c78cf4 100644 --- a/velox/common/memory/HashStringAllocator.cpp +++ b/velox/common/memory/HashStringAllocator.cpp @@ -83,10 +83,6 @@ std::string HashStringAllocator::Header::toString() { HashStringAllocator::~HashStringAllocator() { clear(); -#ifdef NDEBUG - VELOX_CHECK_EQ(state_.cumulativeBytes(), 0); - VELOX_CHECK_EQ(state_.sizeFromPool(), 0); -#endif } void HashStringAllocator::clear() { @@ -108,39 +104,41 @@ void HashStringAllocator::clear() { #ifdef NDEBUG static const auto kHugePageSize = memory::AllocationTraits::kHugePageSize; for (auto i = 0; i < state_.pool().numRanges(); ++i) { - const auto topRange = state_.pool().rangeAt(i); - const auto topRangeSize = topRange.size(); - if (topRangeSize >= kHugePageSize) { - VELOX_CHECK_EQ(0, topRangeSize % kHugePageSize); + const auto range = state_.pool().rangeAt(i); + const auto rangeSize = range.size(); + if (rangeSize >= kHugePageSize) { + VELOX_CHECK_EQ(0, rangeSize % kHugePageSize); } - for (int64_t subRangeStart = 0; subRangeStart < topRangeSize; - subRangeStart += kHugePageSize) { - auto range = folly::Range( - topRange.data() + subRangeStart, - std::min(topRangeSize, kHugePageSize)); - const auto size = range.size() - simd::kPadding; - auto* end = castToHeader(range.data() + size); - auto* header = castToHeader(range.data()); + for (int64_t blockOffset = 0; blockOffset < rangeSize; + blockOffset += kHugePageSize) { + auto blockRange = folly::Range( + range.data() + blockOffset, + std::min(rangeSize, kHugePageSize)); + const auto size = blockRange.size() - simd::kPadding; + auto* end = castToHeader(blockRange.data() + size); + auto* header = castToHeader(blockRange.data()); while (header != end) { - VELOX_CHECK_GE(reinterpret_cast(header), range.data()); + VELOX_CHECK_GE(reinterpret_cast(header), blockRange.data()); VELOX_CHECK_LT( reinterpret_cast(header), reinterpret_cast(end)); VELOX_CHECK_LE( reinterpret_cast(header->end()), reinterpret_cast(end)); - if (header->isFree()) { - // No-op - } else { - // Continued block & Non-free block. + // Continued block & Non-free block. + if (!header->isFree()) { state_.cumulativeBytes() -= blockBytes(header); } header = castToHeader(header->end()); } } } + + VELOX_DCHECK_EQ(state_.cumulativeBytes(), 0); + VELOX_DCHECK_EQ(state_.sizeFromPool(), 0); #endif + state_.pool().clear(); } diff --git a/velox/common/memory/HashStringAllocator.h b/velox/common/memory/HashStringAllocator.h index ed6eaacad524d..cd3025ac33858 100644 --- a/velox/common/memory/HashStringAllocator.h +++ b/velox/common/memory/HashStringAllocator.h @@ -222,11 +222,13 @@ class HashStringAllocator : public StreamArena { return castToHeader(data) - 1; } + /// Returns the header below 'data'. static Header* castToHeader(const void* data) { return reinterpret_cast( const_cast(reinterpret_cast(data))); } + /// Returns the byte size of block pointed by 'header'. inline size_t blockBytes(const Header* header) const { return header->size() + kHeaderSize; }