From c64aca42bc37d265742dc9a3c33c24796967f02a Mon Sep 17 00:00:00 2001 From: Patrick Xia Date: Fri, 24 Jan 2025 10:21:36 -0800 Subject: [PATCH] minor, refactor, whitespace-only, nfc: clang-format the repo PiperOrigin-RevId: 719350703 Change-Id: I17e91e9a7982f7364e8c5f75b940d7166312ad8f --- tcmalloc/background.cc | 1 - tcmalloc/central_freelist.h | 3 +- tcmalloc/deallocation_profiler.cc | 2 +- tcmalloc/global_stats.cc | 1 - tcmalloc/guarded_page_allocator.h | 10 +- .../guarded_page_allocator_profile_test.cc | 30 ++- tcmalloc/huge_allocator_test.cc | 4 +- tcmalloc/huge_page_filler_test.cc | 175 +++++++++--------- tcmalloc/huge_page_subrelease.h | 8 +- tcmalloc/internal/allocation_guard_test.cc | 4 +- tcmalloc/internal/config.h | 2 +- tcmalloc/internal/declarations.h | 1 - tcmalloc/internal/logging.h | 4 +- tcmalloc/internal/memory_stats_test.cc | 1 - tcmalloc/internal/percpu_tcmalloc.h | 9 +- tcmalloc/internal/util.h | 8 +- tcmalloc/mock_huge_page_static_forwarder.cc | 1 - tcmalloc/mock_transfer_cache.h | 2 +- tcmalloc/new_extension_test.cc | 12 +- tcmalloc/profile_test.cc | 4 +- tcmalloc/sizemap_test.cc | 12 +- tcmalloc/stack_trace_table.h | 2 - .../malloc_extension_system_malloc_test.cc | 3 +- .../testing/malloc_tracing_extension_test.cc | 1 - ...able_huge_region_more_often_test_helper.cc | 1 - ...t_disable_tcmalloc_big_span_test_helper.cc | 1 - tcmalloc/testing/want_hpaa_test_helper.cc | 1 - tcmalloc/transfer_cache.h | 5 +- 28 files changed, 143 insertions(+), 165 deletions(-) diff --git a/tcmalloc/background.cc b/tcmalloc/background.cc index 940ebda01..762214ced 100644 --- a/tcmalloc/background.cc +++ b/tcmalloc/background.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - #include #include diff --git a/tcmalloc/central_freelist.h b/tcmalloc/central_freelist.h index b815e7744..980554f04 100644 --- a/tcmalloc/central_freelist.h +++ b/tcmalloc/central_freelist.h @@ -309,8 +309,7 @@ inline void CentralFreeList::Init(size_t size_class) objects_per_span_ = pages_per_span_.in_bytes() / (object_size_ ? object_size_ : 1); size_reciprocal_ = Span::CalcReciprocal(object_size_); - use_all_buckets_for_few_object_spans_ = - objects_per_span_ <= 2 * kNumLists; + use_all_buckets_for_few_object_spans_ = objects_per_span_ <= 2 * kNumLists; // Records nonempty_ list index associated with the span with // objects_per_span_ number of allocated objects. Refer to the comment in diff --git a/tcmalloc/deallocation_profiler.cc b/tcmalloc/deallocation_profiler.cc index 7964c9eaa..60539c471 100644 --- a/tcmalloc/deallocation_profiler.cc +++ b/tcmalloc/deallocation_profiler.cc @@ -15,7 +15,7 @@ #include "tcmalloc/deallocation_profiler.h" #include -#include // for std::lround +#include // for std::lround #include #include // for uintptr_t #include diff --git a/tcmalloc/global_stats.cc b/tcmalloc/global_stats.cc index 20d34426b..243f614cc 100644 --- a/tcmalloc/global_stats.cc +++ b/tcmalloc/global_stats.cc @@ -182,7 +182,6 @@ void ExtractStats(TCMallocStats* r, uint64_t* class_count, } else { r->pagemap_root_bytes_res = 0; } - } void ExtractTCMallocStats(TCMallocStats* r, bool report_residence) { diff --git a/tcmalloc/guarded_page_allocator.h b/tcmalloc/guarded_page_allocator.h index 72d658cad..cb8e03146 100644 --- a/tcmalloc/guarded_page_allocator.h +++ b/tcmalloc/guarded_page_allocator.h @@ -302,12 +302,12 @@ class GuardedPageAllocator { // is detected. SlotMetadata* data_; - uintptr_t pages_base_addr_; // Points to start of mapped region. - uintptr_t pages_end_addr_; // Points to the end of mapped region. - uintptr_t first_page_addr_; // Points to first page returnable by Allocate. + uintptr_t pages_base_addr_; // Points to start of mapped region. + uintptr_t pages_end_addr_; // Points to the end of mapped region. + uintptr_t first_page_addr_; // Points to first page returnable by Allocate. size_t max_allocated_pages_; // Max number of pages to allocate at once. - size_t total_pages_; // Size of the page pool to allocate from. - size_t page_size_; // Size of pages we allocate. + size_t total_pages_; // Size of the page pool to allocate from. + size_t page_size_; // Size of pages we allocate. Random rand_; // True if this object has been fully initialized. diff --git a/tcmalloc/guarded_page_allocator_profile_test.cc b/tcmalloc/guarded_page_allocator_profile_test.cc index fc3303212..961bf77b6 100644 --- a/tcmalloc/guarded_page_allocator_profile_test.cc +++ b/tcmalloc/guarded_page_allocator_profile_test.cc @@ -105,9 +105,8 @@ TEST_F(GuardedPageAllocatorProfileTest, Guarded) { AllocateUntilGuarded(); auto token = MallocExtension::StartAllocationProfiling(); - AllocateGuardableUntil(1051, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + AllocateGuardableUntil( + 1051, [&](void* alloc) -> NextSteps { return {true, true}; }); auto profile = std::move(token).Stop(); ExamineSamples(profile, Profile::Sample::GuardedStatus::Guarded); @@ -118,9 +117,8 @@ TEST_F(GuardedPageAllocatorProfileTest, NotAttempted) { auto token = MallocExtension::StartAllocationProfiling(); constexpr size_t alloc_size = 2 * 1024 * 1024; - AllocateUntil(alloc_size, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + AllocateUntil(alloc_size, + [&](void* alloc) -> NextSteps { return {true, true}; }); auto profile = std::move(token).Stop(); ExamineSamples(profile, Profile::Sample::GuardedStatus::NotAttempted, @@ -141,9 +139,8 @@ TEST_F(GuardedPageAllocatorProfileTest, LargerThanOnePage) { auto token = MallocExtension::StartAllocationProfiling(); constexpr size_t alloc_size = kPageSize + 1; - AllocateUntil(alloc_size, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + AllocateUntil(alloc_size, + [&](void* alloc) -> NextSteps { return {true, true}; }); auto profile = std::move(token).Stop(); ExamineSamples(profile, Profile::Sample::GuardedStatus::LargerThanOnePage, @@ -163,9 +160,8 @@ TEST_F(GuardedPageAllocatorProfileTest, Disabled) { ScopedProfileSamplingInterval profile_sampling_interval(1); auto token = MallocExtension::StartAllocationProfiling(); - AllocateGuardableUntil(1024, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + AllocateGuardableUntil( + 1024, [&](void* alloc) -> NextSteps { return {true, true}; }); auto profile = std::move(token).Stop(); ExamineSamples(profile, Profile::Sample::GuardedStatus::Disabled); @@ -250,9 +246,8 @@ TEST_F(GuardedPageAllocatorProfileTest, TooSmall) { // Next sampled allocation should be too small constexpr size_t alloc_size = 0; - AllocateGuardableUntil(alloc_size, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + AllocateGuardableUntil( + alloc_size, [&](void* alloc) -> NextSteps { return {true, true}; }); auto profile = std::move(token).Stop(); ExamineSamples(profile, Profile::Sample::GuardedStatus::TooSmall, @@ -301,9 +296,8 @@ TEST_F(GuardedPageAllocatorProfileTest, NeverSample) { auto token = MallocExtension::StartAllocationProfiling(); // This will not succeed in guarding anything. - int alloc_count = AllocateGuardableUntil(1025, [&](void* alloc) -> NextSteps { - return {true, true}; - }); + int alloc_count = AllocateGuardableUntil( + 1025, [&](void* alloc) -> NextSteps { return {true, true}; }); ASSERT_EQ(alloc_count, 1); auto profile = std::move(token).Stop(); diff --git a/tcmalloc/huge_allocator_test.cc b/tcmalloc/huge_allocator_test.cc index 9d74938c4..a26c4b50e 100644 --- a/tcmalloc/huge_allocator_test.cc +++ b/tcmalloc/huge_allocator_test.cc @@ -54,9 +54,7 @@ class HugeAllocatorTest : public testing::TestWithParam { vm_allocator_.backing_.resize(1024); } - ~HugeAllocatorTest() override { - vm_allocator_.backing_.clear(); - } + ~HugeAllocatorTest() override { vm_allocator_.backing_.clear(); } size_t* GetActual(HugePage p) { return &vm_allocator_.backing_[p.index()]; } diff --git a/tcmalloc/huge_page_filler_test.cc b/tcmalloc/huge_page_filler_test.cc index 75b7d5681..5b5b0c1be 100644 --- a/tcmalloc/huge_page_filler_test.cc +++ b/tcmalloc/huge_page_filler_test.cc @@ -1688,24 +1688,23 @@ TEST_P(FillerTest, CheckPreviouslyReleasedStats) { Printer printer(&*buffer.begin(), buffer.size()); filler_.Print(&printer, true); } - buffer.resize(strlen(buffer.c_str())); - EXPECT_THAT(buffer, testing::HasSubstr( - "HugePageFiller: 0 hugepages became full after " - "being previously released, " - "out of which 0 pages are hugepage backed.")); - - // Repopulate. - ASSERT_TRUE(!tiny1.empty()); - half = - AllocateVectorWithSpanAllocInfo(N / 2, tiny1.front().span_alloc_info); - EXPECT_EQ(ReleasePages(kMaxValidPages), Length(0)); - EXPECT_EQ(filler_.previously_released_huge_pages(), NHugePages(1)); - buffer.resize(1024 * 1024); - { - PageHeapSpinLockHolder l; - Printer printer(&*buffer.begin(), buffer.size()); - filler_.Print(&printer, true); - } + buffer.resize(strlen(buffer.c_str())); + EXPECT_THAT(buffer, testing::HasSubstr( + "HugePageFiller: 0 hugepages became full after " + "being previously released, " + "out of which 0 pages are hugepage backed.")); + + // Repopulate. + ASSERT_TRUE(!tiny1.empty()); + half = AllocateVectorWithSpanAllocInfo(N / 2, tiny1.front().span_alloc_info); + EXPECT_EQ(ReleasePages(kMaxValidPages), Length(0)); + EXPECT_EQ(filler_.previously_released_huge_pages(), NHugePages(1)); + buffer.resize(1024 * 1024); + { + PageHeapSpinLockHolder l; + Printer printer(&*buffer.begin(), buffer.size()); + filler_.Print(&printer, true); + } buffer.resize(strlen(buffer.c_str())); EXPECT_THAT(buffer, @@ -2084,76 +2083,76 @@ TEST_P(FillerTest, SkipPartialAllocSubrelease_SpansAllocated) { // in demand and tries to subrelease. Finally, it waits for time interval c to // generate the highest peak for evaluating subrelease correctness. Skip // subrelease selects those demand points using provided time intervals. - const auto demand_pattern = - [&](absl::Duration a, absl::Duration b, absl::Duration c, - SkipSubreleaseIntervals intervals, bool expected_subrelease) { - const Length N = kPagesPerHugePage; - // First peak: min_demand 3/4N, max_demand 1N. - std::vector peak1a = - AllocateVectorWithSpanAllocInfo(3 * N / 4, info); - ASSERT_TRUE(!peak1a.empty()); - std::vector peak1b = AllocateVectorWithSpanAllocInfo( - N / 4, peak1a.front().span_alloc_info); - Advance(a); - // Second peak: min_demand 0, max_demand 2N. - DeleteVector(peak1a); - DeleteVector(peak1b); - - std::vector half = AllocateVectorWithSpanAllocInfo(N / 2, info); - ASSERT_TRUE(!half.empty()); - std::vector tiny1 = AllocateVectorWithSpanAllocInfo( - N / 4, half.front().span_alloc_info); - std::vector tiny2 = AllocateVectorWithSpanAllocInfo( - N / 4, half.front().span_alloc_info); - - // To force a peak, we allocate 3/4 and 1/4 of a huge page. This is - // necessary after we delete `half` below, as a half huge page for the - // peak would fill into the gap previously occupied by it. - std::vector peak2a = - AllocateVectorWithSpanAllocInfo(3 * N / 4, info); - ASSERT_TRUE(!peak2a.empty()); - std::vector peak2b = AllocateVectorWithSpanAllocInfo( - N / 4, peak2a.front().span_alloc_info); - EXPECT_EQ(filler_.used_pages(), 2 * N); - DeleteVector(peak2a); - DeleteVector(peak2b); - Advance(b); - DeleteVector(half); - EXPECT_EQ(filler_.free_pages(), Length(N / 2)); - // The number of released pages is limited to the number of free pages. - EXPECT_EQ(expected_subrelease ? N / 2 : Length(0), - ReleasePartialPages(10 * N, intervals)); - - Advance(c); - half = AllocateVectorWithSpanAllocInfo(N / 2, - half.front().span_alloc_info); - // Third peak: min_demand 1/2N, max_demand (2+1/2)N. - std::vector peak3a = - AllocateVectorWithSpanAllocInfo(3 * N / 4, info); - ASSERT_TRUE(!peak3a.empty()); - std::vector peak3b = AllocateVectorWithSpanAllocInfo( - N / 4, peak3a.front().span_alloc_info); - - std::vector peak4a = - AllocateVectorWithSpanAllocInfo(3 * N / 4, info); - ASSERT_TRUE(!peak4a.empty()); - std::vector peak4b = AllocateVectorWithSpanAllocInfo( - N / 4, peak4a.front().span_alloc_info); - - DeleteVector(half); - DeleteVector(tiny1); - DeleteVector(tiny2); - DeleteVector(peak3a); - DeleteVector(peak3b); - DeleteVector(peak4a); - DeleteVector(peak4b); - - EXPECT_EQ(filler_.used_pages(), Length(0)); - EXPECT_EQ(filler_.unmapped_pages(), Length(0)); - EXPECT_EQ(filler_.free_pages(), Length(0)); - - EXPECT_EQ(Length(0), ReleasePartialPages(10 * N)); - }; + const auto demand_pattern = [&](absl::Duration a, absl::Duration b, + absl::Duration c, + SkipSubreleaseIntervals intervals, + bool expected_subrelease) { + const Length N = kPagesPerHugePage; + // First peak: min_demand 3/4N, max_demand 1N. + std::vector peak1a = + AllocateVectorWithSpanAllocInfo(3 * N / 4, info); + ASSERT_TRUE(!peak1a.empty()); + std::vector peak1b = + AllocateVectorWithSpanAllocInfo(N / 4, peak1a.front().span_alloc_info); + Advance(a); + // Second peak: min_demand 0, max_demand 2N. + DeleteVector(peak1a); + DeleteVector(peak1b); + + std::vector half = AllocateVectorWithSpanAllocInfo(N / 2, info); + ASSERT_TRUE(!half.empty()); + std::vector tiny1 = + AllocateVectorWithSpanAllocInfo(N / 4, half.front().span_alloc_info); + std::vector tiny2 = + AllocateVectorWithSpanAllocInfo(N / 4, half.front().span_alloc_info); + + // To force a peak, we allocate 3/4 and 1/4 of a huge page. This is + // necessary after we delete `half` below, as a half huge page for the + // peak would fill into the gap previously occupied by it. + std::vector peak2a = + AllocateVectorWithSpanAllocInfo(3 * N / 4, info); + ASSERT_TRUE(!peak2a.empty()); + std::vector peak2b = + AllocateVectorWithSpanAllocInfo(N / 4, peak2a.front().span_alloc_info); + EXPECT_EQ(filler_.used_pages(), 2 * N); + DeleteVector(peak2a); + DeleteVector(peak2b); + Advance(b); + DeleteVector(half); + EXPECT_EQ(filler_.free_pages(), Length(N / 2)); + // The number of released pages is limited to the number of free pages. + EXPECT_EQ(expected_subrelease ? N / 2 : Length(0), + ReleasePartialPages(10 * N, intervals)); + + Advance(c); + half = AllocateVectorWithSpanAllocInfo(N / 2, half.front().span_alloc_info); + // Third peak: min_demand 1/2N, max_demand (2+1/2)N. + std::vector peak3a = + AllocateVectorWithSpanAllocInfo(3 * N / 4, info); + ASSERT_TRUE(!peak3a.empty()); + std::vector peak3b = + AllocateVectorWithSpanAllocInfo(N / 4, peak3a.front().span_alloc_info); + + std::vector peak4a = + AllocateVectorWithSpanAllocInfo(3 * N / 4, info); + ASSERT_TRUE(!peak4a.empty()); + std::vector peak4b = + AllocateVectorWithSpanAllocInfo(N / 4, peak4a.front().span_alloc_info); + + DeleteVector(half); + DeleteVector(tiny1); + DeleteVector(tiny2); + DeleteVector(peak3a); + DeleteVector(peak3b); + DeleteVector(peak4a); + DeleteVector(peak4b); + + EXPECT_EQ(filler_.used_pages(), Length(0)); + EXPECT_EQ(filler_.unmapped_pages(), Length(0)); + EXPECT_EQ(filler_.free_pages(), Length(0)); + + EXPECT_EQ(Length(0), ReleasePartialPages(10 * N)); + }; { // Uses peak interval for skipping subrelease. We should correctly skip diff --git a/tcmalloc/huge_page_subrelease.h b/tcmalloc/huge_page_subrelease.h index d2d6b77cd..7fb861ad5 100644 --- a/tcmalloc/huge_page_subrelease.h +++ b/tcmalloc/huge_page_subrelease.h @@ -224,7 +224,7 @@ struct SkipSubreleaseIntervals { }; struct SubreleaseStats { - Length total_pages_subreleased; // cumulative since startup + Length total_pages_subreleased; // cumulative since startup Length total_partial_alloc_pages_subreleased; // cumulative since startup Length num_pages_subreleased; Length num_partial_alloc_pages_subreleased; @@ -458,11 +458,7 @@ class SubreleaseStatsTracker { // We collect subrelease statistics at four "interesting points" within each // time step: at min/max demand of pages and at min/max use of hugepages. This // allows us to approximate the envelope of the different metrics. - enum StatsType { - kStatsAtMinDemand, - kStatsAtMaxDemand, - kNumStatsTypes - }; + enum StatsType { kStatsAtMinDemand, kStatsAtMaxDemand, kNumStatsTypes }; struct SubreleaseStatsEntry { // Collect stats at "interesting points" (minimum/maximum page demand diff --git a/tcmalloc/internal/allocation_guard_test.cc b/tcmalloc/internal/allocation_guard_test.cc index cdb8509cc..c1faa12e4 100644 --- a/tcmalloc/internal/allocation_guard_test.cc +++ b/tcmalloc/internal/allocation_guard_test.cc @@ -32,8 +32,8 @@ TEST(AllocationGuard, Noncooperative) { TEST(AllocationGuard, CooperativeDeathTest) { absl::base_internal::SpinLock lock; - EXPECT_DEBUG_DEATH({ AllocationGuardSpinLockHolder h(&lock); }, - "SIGABRT received"); + EXPECT_DEBUG_DEATH( + { AllocationGuardSpinLockHolder h(&lock); }, "SIGABRT received"); } } // namespace diff --git a/tcmalloc/internal/config.h b/tcmalloc/internal/config.h index 334cef117..442cdce87 100644 --- a/tcmalloc/internal/config.h +++ b/tcmalloc/internal/config.h @@ -30,7 +30,7 @@ #if defined(__GLIBC__) && defined(__GLIBC_MINOR__) #define TCMALLOC_GLIBC_PREREQ(major, minor) \ - ((__GLIBC__ * 100 + __GLIBC_MINOR__) >= ((major)*100 + (minor))) + ((__GLIBC__ * 100 + __GLIBC_MINOR__) >= ((major) * 100 + (minor))) #else #define TCMALLOC_GLIBC_PREREQ(major, minor) 0 #endif diff --git a/tcmalloc/internal/declarations.h b/tcmalloc/internal/declarations.h index 0e050667b..7ef46ba06 100644 --- a/tcmalloc/internal/declarations.h +++ b/tcmalloc/internal/declarations.h @@ -18,7 +18,6 @@ #ifndef TCMALLOC_INTERNAL_DECLARATIONS_H_ #define TCMALLOC_INTERNAL_DECLARATIONS_H_ - #if !defined(__cpp_sized_deallocation) void operator delete(void*, std::size_t) noexcept; diff --git a/tcmalloc/internal/logging.h b/tcmalloc/internal/logging.h index e751cedf7..0e4f63ac3 100644 --- a/tcmalloc/internal/logging.h +++ b/tcmalloc/internal/logging.h @@ -250,8 +250,8 @@ const T& FormatConvert(const T& v) { // Print into buffer class Printer { private: - char* buf_; // Where should we write next - size_t left_; // Space left in buffer (including space for \0) + char* buf_; // Where should we write next + size_t left_; // Space left in buffer (including space for \0) size_t required_; // Space we needed to complete all printf calls up to this // point diff --git a/tcmalloc/internal/memory_stats_test.cc b/tcmalloc/internal/memory_stats_test.cc index f18806461..51929879a 100644 --- a/tcmalloc/internal/memory_stats_test.cc +++ b/tcmalloc/internal/memory_stats_test.cc @@ -17,7 +17,6 @@ #include #include - #include "gtest/gtest.h" namespace tcmalloc { diff --git a/tcmalloc/internal/percpu_tcmalloc.h b/tcmalloc/internal/percpu_tcmalloc.h index d18c30ca0..8112343dd 100644 --- a/tcmalloc/internal/percpu_tcmalloc.h +++ b/tcmalloc/internal/percpu_tcmalloc.h @@ -1017,8 +1017,9 @@ inline void* TcmallocSlab::CpuMemoryStart(void* slabs, Shift shift, } template -inline auto TcmallocSlab::GetHeader( - void* slabs, Shift shift, int cpu, size_t size_class) -> AtomicHeader* { +inline auto TcmallocSlab::GetHeader(void* slabs, Shift shift, + int cpu, size_t size_class) + -> AtomicHeader* { TC_ASSERT_NE(size_class, 0); return &static_cast( CpuMemoryStart(slabs, shift, cpu))[size_class]; @@ -1285,8 +1286,8 @@ template auto TcmallocSlab::ResizeSlabs( Shift new_shift, void* new_slabs, absl::FunctionRef capacity, - absl::FunctionRef populated, - DrainHandler drain_handler) -> ResizeSlabsInfo { + absl::FunctionRef populated, DrainHandler drain_handler) + -> ResizeSlabsInfo { // Phase 1: Collect begins, stop all CPUs and initialize any CPUs in the new // slab that have already been populated in the old slab. const auto [old_slabs, old_shift] = diff --git a/tcmalloc/internal/util.h b/tcmalloc/internal/util.h index 314a9a1af..d7e3b77e0 100644 --- a/tcmalloc/internal/util.h +++ b/tcmalloc/internal/util.h @@ -98,14 +98,14 @@ class ScopedSigmask { ScopedSigmask() noexcept; // No copy, move or assign - ScopedSigmask(const ScopedSigmask &) = delete; - ScopedSigmask &operator=(const ScopedSigmask &) = delete; + ScopedSigmask(const ScopedSigmask&) = delete; + ScopedSigmask& operator=(const ScopedSigmask&) = delete; // Restores the masked signal handlers to its former state. ~ScopedSigmask() noexcept; private: - void Setmask(int how, sigset_t *set, sigset_t *old); + void Setmask(int how, sigset_t* set, sigset_t* old); sigset_t old_set_; }; @@ -120,7 +120,7 @@ inline ScopedSigmask::~ScopedSigmask() noexcept { Setmask(SIG_SETMASK, &old_set_, nullptr); } -inline void ScopedSigmask::Setmask(int how, sigset_t *set, sigset_t *old) { +inline void ScopedSigmask::Setmask(int how, sigset_t* set, sigset_t* old) { const int result = pthread_sigmask(how, set, old); TC_CHECK_EQ(result, 0); } diff --git a/tcmalloc/mock_huge_page_static_forwarder.cc b/tcmalloc/mock_huge_page_static_forwarder.cc index f9dffa254..3625d6235 100644 --- a/tcmalloc/mock_huge_page_static_forwarder.cc +++ b/tcmalloc/mock_huge_page_static_forwarder.cc @@ -11,4 +11,3 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. - diff --git a/tcmalloc/mock_transfer_cache.h b/tcmalloc/mock_transfer_cache.h index 958b007e0..65cde5ffa 100644 --- a/tcmalloc/mock_transfer_cache.h +++ b/tcmalloc/mock_transfer_cache.h @@ -209,7 +209,7 @@ class FakeTransferCacheEnvironment { FreeList& central_freelist() { return cache_.freelist(); } private: - void Init(){}; + void Init() {}; Manager manager_; TransferCache cache_; diff --git a/tcmalloc/new_extension_test.cc b/tcmalloc/new_extension_test.cc index 71a4fcb7c..b531e96f6 100644 --- a/tcmalloc/new_extension_test.cc +++ b/tcmalloc/new_extension_test.cc @@ -39,9 +39,9 @@ TEST(HotColdNew, InvalidSizeFails) { GTEST_SKIP() << "skipping large allocation tests on sanitizers"; #endif constexpr size_t kBadSize = std::numeric_limits::max(); - EXPECT_DEATH(::operator new (kBadSize, hot_cold_t{0}), ".*"); - EXPECT_DEATH(::operator new (kBadSize, hot_cold_t{128}), ".*"); - EXPECT_DEATH(::operator new (kBadSize, hot_cold_t{255}), ".*"); + EXPECT_DEATH(::operator new(kBadSize, hot_cold_t{0}), ".*"); + EXPECT_DEATH(::operator new(kBadSize, hot_cold_t{128}), ".*"); + EXPECT_DEATH(::operator new(kBadSize, hot_cold_t{255}), ".*"); EXPECT_DEATH(::operator new[](kBadSize, hot_cold_t{0}), ".*"); EXPECT_DEATH(::operator new[](kBadSize, hot_cold_t{128}), ".*"); EXPECT_DEATH(::operator new[](kBadSize, hot_cold_t{255}), ".*"); @@ -49,9 +49,9 @@ TEST(HotColdNew, InvalidSizeFails) { TEST(HotColdNew, InvalidSizeNothrow) { constexpr size_t kBadSize = std::numeric_limits::max(); - EXPECT_EQ(::operator new (kBadSize, std::nothrow, hot_cold_t{0}), nullptr); - EXPECT_EQ(::operator new (kBadSize, std::nothrow, hot_cold_t{128}), nullptr); - EXPECT_EQ(::operator new (kBadSize, std::nothrow, hot_cold_t{255}), nullptr); + EXPECT_EQ(::operator new(kBadSize, std::nothrow, hot_cold_t{0}), nullptr); + EXPECT_EQ(::operator new(kBadSize, std::nothrow, hot_cold_t{128}), nullptr); + EXPECT_EQ(::operator new(kBadSize, std::nothrow, hot_cold_t{255}), nullptr); EXPECT_EQ(::operator new[](kBadSize, std::nothrow, hot_cold_t{0}), nullptr); EXPECT_EQ(::operator new[](kBadSize, std::nothrow, hot_cold_t{128}), nullptr); EXPECT_EQ(::operator new[](kBadSize, std::nothrow, hot_cold_t{255}), nullptr); diff --git a/tcmalloc/profile_test.cc b/tcmalloc/profile_test.cc index 5509d731b..c98a26af8 100644 --- a/tcmalloc/profile_test.cc +++ b/tcmalloc/profile_test.cc @@ -63,7 +63,9 @@ TEST(AllocationSampleTest, TokenAbuse) { EXPECT_EQ(count2, 0); // Delete (on the scope ending) without Claim should also be OK. - { MallocExtension::StartAllocationProfiling(); } + { + MallocExtension::StartAllocationProfiling(); + } } // Verify that profiling sessions concurrent with allocations do not crash due diff --git a/tcmalloc/sizemap_test.cc b/tcmalloc/sizemap_test.cc index 00557fdfa..794391d29 100644 --- a/tcmalloc/sizemap_test.cc +++ b/tcmalloc/sizemap_test.cc @@ -109,12 +109,12 @@ TEST(ColdSizeClassTest, VerifyAllocationFullRange) { size_t max_size = classes[classes.size() - 1].size; for (int request_size = size_before_min_alloc_for_cold + 1; request_size <= max_size; ++request_size) { - EXPECT_EQ(size_map.SizeClass(CppPolicy().AccessAsCold(), request_size), - size_map.SizeClass(CppPolicy().AccessAsHot(), request_size) + - (tc_globals.numa_topology().GetCurrentPartition() == 0 - ? kExpandedClassesStart - : kNumBaseClasses)) - << request_size; + EXPECT_EQ(size_map.SizeClass(CppPolicy().AccessAsCold(), request_size), + size_map.SizeClass(CppPolicy().AccessAsHot(), request_size) + + (tc_globals.numa_topology().GetCurrentPartition() == 0 + ? kExpandedClassesStart + : kNumBaseClasses)) + << request_size; } } diff --git a/tcmalloc/stack_trace_table.h b/tcmalloc/stack_trace_table.h index c74f9b7a1..b8ace94a4 100644 --- a/tcmalloc/stack_trace_table.h +++ b/tcmalloc/stack_trace_table.h @@ -17,8 +17,6 @@ #ifndef TCMALLOC_STACK_TRACE_TABLE_H_ #define TCMALLOC_STACK_TRACE_TABLE_H_ - - #include "absl/base/thread_annotations.h" #include "absl/time/time.h" #include "tcmalloc/common.h" diff --git a/tcmalloc/testing/malloc_extension_system_malloc_test.cc b/tcmalloc/testing/malloc_extension_system_malloc_test.cc index ff13f36cc..b58599069 100644 --- a/tcmalloc/testing/malloc_extension_system_malloc_test.cc +++ b/tcmalloc/testing/malloc_extension_system_malloc_test.cc @@ -40,7 +40,8 @@ TEST(MallocExtension, SnapshotCurrentIsEmpty) { // All of the profiles should be empty. ProfileType types[] = { ProfileType::kHeap, - ProfileType::kFragmentation, ProfileType::kPeakHeap, + ProfileType::kFragmentation, + ProfileType::kPeakHeap, ProfileType::kAllocations, }; diff --git a/tcmalloc/testing/malloc_tracing_extension_test.cc b/tcmalloc/testing/malloc_tracing_extension_test.cc index 1cbac3b78..f57f18b29 100644 --- a/tcmalloc/testing/malloc_tracing_extension_test.cc +++ b/tcmalloc/testing/malloc_tracing_extension_test.cc @@ -21,7 +21,6 @@ #include - #include "gtest/gtest.h" #include "absl/status/status.h" #include "absl/status/statusor.h" diff --git a/tcmalloc/testing/want_disable_huge_region_more_often_test_helper.cc b/tcmalloc/testing/want_disable_huge_region_more_often_test_helper.cc index 39407fcb5..409ea137b 100644 --- a/tcmalloc/testing/want_disable_huge_region_more_often_test_helper.cc +++ b/tcmalloc/testing/want_disable_huge_region_more_often_test_helper.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - #include #include diff --git a/tcmalloc/testing/want_disable_tcmalloc_big_span_test_helper.cc b/tcmalloc/testing/want_disable_tcmalloc_big_span_test_helper.cc index 159970f7c..288137525 100644 --- a/tcmalloc/testing/want_disable_tcmalloc_big_span_test_helper.cc +++ b/tcmalloc/testing/want_disable_tcmalloc_big_span_test_helper.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - #include #include diff --git a/tcmalloc/testing/want_hpaa_test_helper.cc b/tcmalloc/testing/want_hpaa_test_helper.cc index c9de891ce..245e4469c 100644 --- a/tcmalloc/testing/want_hpaa_test_helper.cc +++ b/tcmalloc/testing/want_hpaa_test_helper.cc @@ -12,7 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. - #include #include diff --git a/tcmalloc/transfer_cache.h b/tcmalloc/transfer_cache.h index 3eb9ca1e0..5171ab3b2 100644 --- a/tcmalloc/transfer_cache.h +++ b/tcmalloc/transfer_cache.h @@ -77,9 +77,8 @@ class ShardedStaticForwarder : public StaticForwarder { // To make sure that we do not change the behavior of the traditional // sharded cache configuration, we use generic version of the cache only // when the traditional version is not enabled. - use_generic_cache_ = - !IsExperimentActive( - Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE); + use_generic_cache_ = !IsExperimentActive( + Experiment::TEST_ONLY_TCMALLOC_SHARDED_TRANSFER_CACHE); // Traditionally, we enable sharded transfer cache for large size // classes alone. enable_cache_for_large_classes_only_ = IsExperimentActive(