From 937ad19afaa59011cfd465136e793f404b20e051 Mon Sep 17 00:00:00 2001 From: Goetz Lindenmaier Date: Thu, 11 Dec 2025 14:08:17 +0100 Subject: [PATCH] backport 53e7ea891d8c3d91340bf1967aa94104f54b467c --- src/hotspot/share/gc/g1/g1CollectedHeap.cpp | 23 ++++++++++++++------- src/hotspot/share/gc/g1/g1CollectedHeap.hpp | 20 ++++++++---------- 2 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp index 8acbaae9bb5..f63889e18e2 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.cpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.cpp @@ -360,22 +360,26 @@ HeapWord* G1CollectedHeap::allocate_new_tlab(size_t min_size, assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(requested_size), "we do not allow humongous TLABs"); - return attempt_allocation(min_size, requested_size, actual_size); + // Do not allow a GC because we are allocating a new TLAB to avoid an issue + // with UseGCOverheadLimit: although this GC would return null if the overhead + // limit would be exceeded, but it would likely free at least some space. + // So the subsequent outside-TLAB allocation could be successful anyway and + // the indication that the overhead limit had been exceeded swallowed. + return attempt_allocation(min_size, requested_size, actual_size, false /* allow_gc */); } -HeapWord* -G1CollectedHeap::mem_allocate(size_t word_size, - bool* gc_overhead_limit_was_exceeded) { +HeapWord* G1CollectedHeap::mem_allocate(size_t word_size, + bool* gc_overhead_limit_was_exceeded) { assert_heap_not_locked_and_not_at_safepoint(); if (is_humongous(word_size)) { return attempt_allocation_humongous(word_size); } size_t dummy = 0; - return attempt_allocation(word_size, word_size, &dummy); + return attempt_allocation(word_size, word_size, &dummy, true /* allow_gc */); } -HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { +HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size, bool allow_gc) { ResourceMark rm; // For retrieving the thread names in log messages. // Make sure you read the note in attempt_allocation_humongous(). @@ -448,6 +452,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_slow(size_t word_size) { log_trace(gc, alloc)("%s: Successfully scheduled collection returning " PTR_FORMAT, Thread::current()->name(), p2i(result)); return result; + } else if (!allow_gc) { + return nullptr; } if (succeeded) { @@ -706,7 +712,8 @@ void G1CollectedHeap::fill_archive_regions(MemRegion* ranges, size_t count) { inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, size_t desired_word_size, - size_t* actual_word_size) { + size_t* actual_word_size, + bool allow_gc) { assert_heap_not_locked_and_not_at_safepoint(); assert(!is_humongous(desired_word_size), "attempt_allocation() should not " "be called for humongous allocation requests"); @@ -715,7 +722,7 @@ inline HeapWord* G1CollectedHeap::attempt_allocation(size_t min_word_size, if (result == NULL) { *actual_word_size = desired_word_size; - result = attempt_allocation_slow(desired_word_size); + result = attempt_allocation_slow(desired_word_size, allow_gc); } assert_heap_not_locked(); diff --git a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp index 50aaac4cd53..ccd1542b4d1 100644 --- a/src/hotspot/share/gc/g1/g1CollectedHeap.hpp +++ b/src/hotspot/share/gc/g1/g1CollectedHeap.hpp @@ -439,18 +439,14 @@ class G1CollectedHeap : public CollectedHeap { // // * If either call cannot satisfy the allocation request using the // current allocating region, they will try to get a new one. If - // this fails, they will attempt to do an evacuation pause and - // retry the allocation. - // - // * If all allocation attempts fail, even after trying to schedule - // an evacuation pause, allocate_new_tlab() will return NULL, - // whereas mem_allocate() will attempt a heap expansion and/or - // schedule a Full GC. + // this fails, (only) mem_allocate() will attempt to do an evacuation + // pause and retry the allocation. Allocate_new_tlab() will return null, + // deferring to the following mem_allocate(). // // * We do not allow humongous-sized TLABs. So, allocate_new_tlab // should never be called with word_size being humongous. All // humongous allocation requests should go to mem_allocate() which - // will satisfy them with a special path. + // will satisfy them in a special path. virtual HeapWord* allocate_new_tlab(size_t min_size, size_t requested_size, @@ -464,12 +460,14 @@ class G1CollectedHeap : public CollectedHeap { // should only be used for non-humongous allocations. inline HeapWord* attempt_allocation(size_t min_word_size, size_t desired_word_size, - size_t* actual_word_size); + size_t* actual_word_size, + bool allow_gc); // Second-level mutator allocation attempt: take the Heap_lock and // retry the allocation attempt, potentially scheduling a GC - // pause. This should only be used for non-humongous allocations. - HeapWord* attempt_allocation_slow(size_t word_size); + // pause if allow_gc is set. This should only be used for non-humongous + // allocations. + HeapWord* attempt_allocation_slow(size_t word_size, bool allow_gc); // Takes the Heap_lock and attempts a humongous allocation. It can // potentially schedule a GC pause.