node/deps/v8/test/unittests/heap/heap-unittest.cc
Michaël Zasso 9d7cd9b864
deps: update V8 to 12.8.374.13
PR-URL: https://github.com/nodejs/node/pull/54077
Reviewed-By: Jiawen Geng <technicalcute@gmail.com>
Reviewed-By: Richard Lau <rlau@redhat.com>
Reviewed-By: Joyee Cheung <joyeec9h3@gmail.com>
Reviewed-By: Marco Ippolito <marcoippolito54@gmail.com>
2024-08-16 16:03:01 +02:00

594 lines
22 KiB
C++

// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/heap.h"
#include <cmath>
#include <iostream>
#include <limits>
#include "include/v8-isolate.h"
#include "include/v8-object.h"
#include "src/flags/flags.h"
#include "src/handles/handles-inl.h"
#include "src/heap/gc-tracer-inl.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/marking-state-inl.h"
#include "src/heap/minor-mark-sweep.h"
#include "src/heap/mutable-page-metadata.h"
#include "src/heap/remembered-set.h"
#include "src/heap/safepoint.h"
#include "src/heap/spaces-inl.h"
#include "src/heap/trusted-range.h"
#include "src/objects/js-array-buffer-inl.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/heap/heap-utils.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
using HeapTest = TestWithHeapInternalsAndContext;
TEST(Heap, YoungGenerationSizeFromOldGenerationSize) {
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
// Low memory
ASSERT_EQ((v8_flags.minor_ms ? 4 : 3) * 512u * pm * KB,
i::Heap::YoungGenerationSizeFromOldGenerationSize(128u * hlm * MB));
// High memory
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 2u * pm * MB,
i::Heap::YoungGenerationSizeFromOldGenerationSize(256u * hlm * MB));
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 4u * pm * MB,
i::Heap::YoungGenerationSizeFromOldGenerationSize(512u * hlm * MB));
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 8u * pm * MB,
i::Heap::YoungGenerationSizeFromOldGenerationSize(1u * hlm * GB));
}
TEST(Heap, GenerationSizesFromHeapSize) {
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
size_t old, young;
// Low memory
i::Heap::GenerationSizesFromHeapSize(1 * KB, &young, &old);
ASSERT_EQ(0u, old);
ASSERT_EQ(0u, young);
// On tiny heap max semi space capacity is set to the default capacity which
// MinorMS does not double.
i::Heap::GenerationSizesFromHeapSize(
1 * KB + (v8_flags.minor_ms ? 2 : 3) * 512u * pm * KB, &young, &old);
ASSERT_EQ(1u * KB, old);
ASSERT_EQ((v8_flags.minor_ms ? 2 : 3) * 512u * pm * KB, young);
i::Heap::GenerationSizesFromHeapSize(
128 * hlm * MB + (v8_flags.minor_ms ? 4 : 3) * 512 * pm * KB, &young,
&old);
ASSERT_EQ(128u * hlm * MB, old);
ASSERT_EQ((v8_flags.minor_ms ? 4 : 3) * 512u * pm * KB, young);
// High memory
i::Heap::GenerationSizesFromHeapSize(
256u * hlm * MB + (v8_flags.minor_ms
? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 2 * pm * MB),
&young, &old);
ASSERT_EQ(256u * hlm * MB, old);
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 2 * pm * MB,
young);
i::Heap::GenerationSizesFromHeapSize(
512u * hlm * MB + (v8_flags.minor_ms
? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 4 * pm * MB),
&young, &old);
ASSERT_EQ(512u * hlm * MB, old);
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 4U * pm * MB,
young);
i::Heap::GenerationSizesFromHeapSize(
1u * hlm * GB + (v8_flags.minor_ms
? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 8 * pm * MB),
&young, &old);
ASSERT_EQ(1u * hlm * GB, old);
ASSERT_EQ(v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 8U * pm * MB,
young);
}
TEST(Heap, HeapSizeFromPhysicalMemory) {
const size_t pm = i::Heap::kPointerMultiplier;
const size_t hlm = i::Heap::kHeapLimitMultiplier;
// The expected value is old_generation_size + semi_space_multiplier *
// semi_space_size.
// Low memory
ASSERT_EQ(128 * hlm * MB + (v8_flags.minor_ms ? 4 : 3) * 512 * pm * KB,
i::Heap::HeapSizeFromPhysicalMemory(0u));
ASSERT_EQ(128 * hlm * MB + (v8_flags.minor_ms ? 4 : 3) * 512 * pm * KB,
i::Heap::HeapSizeFromPhysicalMemory(512u * MB));
// High memory
ASSERT_EQ(256 * hlm * MB + (v8_flags.minor_ms
? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 2 * pm * MB),
i::Heap::HeapSizeFromPhysicalMemory(1u * GB));
ASSERT_EQ(512 * hlm * MB + (v8_flags.minor_ms
? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 4 * pm * MB),
i::Heap::HeapSizeFromPhysicalMemory(2u * GB));
ASSERT_EQ(
1 * hlm * GB + (v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 8 * pm * MB),
i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(4u) * GB));
ASSERT_EQ(
1 * hlm * GB + (v8_flags.minor_ms ? 2 * i::Heap::DefaultMaxSemiSpaceSize()
: 3 * 8 * pm * MB),
i::Heap::HeapSizeFromPhysicalMemory(static_cast<uint64_t>(8u) * GB));
}
TEST_F(HeapTest, ASLR) {
#if V8_TARGET_ARCH_X64
#if V8_OS_DARWIN
Heap* heap = i_isolate()->heap();
std::set<void*> hints;
for (int i = 0; i < 1000; i++) {
hints.insert(heap->GetRandomMmapAddr());
}
if (hints.size() == 1) {
EXPECT_TRUE((*hints.begin()) == nullptr);
EXPECT_TRUE(i::GetRandomMmapAddr() == nullptr);
} else {
// It is unlikely that 1000 random samples will collide to less then 500
// values.
EXPECT_GT(hints.size(), 500u);
const uintptr_t kRegionMask = 0xFFFFFFFFu;
void* first = *hints.begin();
for (void* hint : hints) {
uintptr_t diff = reinterpret_cast<uintptr_t>(first) ^
reinterpret_cast<uintptr_t>(hint);
EXPECT_LE(diff, kRegionMask);
}
}
#endif // V8_OS_DARWIN
#endif // V8_TARGET_ARCH_X64
}
TEST_F(HeapTest, ExternalLimitDefault) {
Heap* heap = i_isolate()->heap();
EXPECT_EQ(kExternalAllocationSoftLimit, heap->external_memory_limit());
}
TEST_F(HeapTest, ExternalLimitStaysAboveDefaultForExplicitHandling) {
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(+10 * MB);
v8_isolate()->AdjustAmountOfExternalAllocatedMemory(-10 * MB);
Heap* heap = i_isolate()->heap();
EXPECT_GE(heap->external_memory_limit(), kExternalAllocationSoftLimit);
}
#ifdef V8_COMPRESS_POINTERS
TEST_F(HeapTest, HeapLayout) {
// Produce some garbage.
RunJS(
"let ar = [];"
"for (let i = 0; i < 100; i++) {"
" ar.push(Array(i));"
"}"
"ar.push(Array(32 * 1024 * 1024));");
Address cage_base = i_isolate()->cage_base();
EXPECT_TRUE(IsAligned(cage_base, size_t{4} * GB));
Address code_cage_base = i_isolate()->code_cage_base();
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
EXPECT_TRUE(IsAligned(code_cage_base, kMinExpectedOSPageSize));
} else {
EXPECT_TRUE(IsAligned(code_cage_base, size_t{4} * GB));
}
#if V8_ENABLE_SANDBOX
Address trusted_space_base =
TrustedRange::GetProcessWideTrustedRange()->base();
EXPECT_TRUE(IsAligned(trusted_space_base, size_t{4} * GB));
base::AddressRegion trusted_reservation(trusted_space_base, size_t{4} * GB);
#endif
// Check that all memory chunks belong this region.
base::AddressRegion heap_reservation(cage_base, size_t{4} * GB);
base::AddressRegion code_reservation(code_cage_base, size_t{4} * GB);
IsolateSafepointScope scope(i_isolate()->heap());
OldGenerationMemoryChunkIterator iter(i_isolate()->heap());
while (MutablePageMetadata* chunk = iter.next()) {
Address address = chunk->ChunkAddress();
size_t size = chunk->area_end() - address;
AllocationSpace owner_id = chunk->owner_identity();
if (V8_EXTERNAL_CODE_SPACE_BOOL && IsAnyCodeSpace(owner_id)) {
EXPECT_TRUE(code_reservation.contains(address, size));
#if V8_ENABLE_SANDBOX
} else if (IsAnyTrustedSpace(owner_id)) {
EXPECT_TRUE(trusted_reservation.contains(address, size));
#endif
} else {
EXPECT_TRUE(heap_reservation.contains(address, size));
}
}
}
#endif // V8_COMPRESS_POINTERS
namespace {
void ShrinkNewSpace(NewSpace* new_space) {
if (!v8_flags.minor_ms) {
SemiSpaceNewSpace::From(new_space)->Shrink();
return;
}
// MinorMS shrinks the space as part of sweeping. Here we fake a GC cycle, in
// which we just shrink without marking or sweeping.
PagedNewSpace* paged_new_space = PagedNewSpace::From(new_space);
Heap* heap = paged_new_space->heap();
heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
GCTracer* tracer = heap->tracer();
tracer->StartObservablePause(base::TimeTicks::Now());
tracer->StartCycle(GarbageCollector::MARK_COMPACTOR,
GarbageCollectionReason::kTesting, "heap unittest",
GCTracer::MarkingType::kAtomic);
tracer->StartAtomicPause();
paged_new_space->StartShrinking();
for (auto it = paged_new_space->begin();
it != paged_new_space->end() &&
(paged_new_space->ShouldReleaseEmptyPage());) {
PageMetadata* page = *it++;
if (page->allocated_bytes() == 0) {
paged_new_space->ReleasePage(page);
} else {
// The number of live bytes should be zero, because at this point we're
// after a GC.
DCHECK_EQ(0, page->live_bytes());
// We set it to the number of allocated bytes, because FinishShrinking
// below expects that all pages have been swept and those that remain
// contain live bytes.
page->SetLiveBytes(page->allocated_bytes());
}
}
paged_new_space->FinishShrinking();
for (PageMetadata* page : *paged_new_space) {
// We reset the number of live bytes to zero, as is expected after a GC.
page->SetLiveBytes(0);
}
tracer->StopAtomicPause();
tracer->StopObservablePause(GarbageCollector::MARK_COMPACTOR,
base::TimeTicks::Now());
tracer->NotifyFullSweepingCompleted();
}
} // namespace
TEST_F(HeapTest, GrowAndShrinkNewSpace) {
if (v8_flags.single_generation) return;
{
ManualGCScope manual_gc_scope(i_isolate());
// Avoid shrinking new space in GC epilogue. This can happen if allocation
// throughput samples have been taken while executing the benchmark.
v8_flags.predictable = true;
v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
}
NewSpace* new_space = heap()->new_space();
if (heap()->MaxSemiSpaceSize() == heap()->InitialSemiSpaceSize()) {
return;
}
// Make sure we're in a consistent state to start out.
InvokeMajorGC();
InvokeMajorGC();
ShrinkNewSpace(new_space);
// Explicitly growing should double the space capacity.
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
old_capacity = new_space->TotalCapacity();
{
v8::HandleScope temporary_scope(reinterpret_cast<v8::Isolate*>(isolate()));
SimulateFullSpace(new_space);
}
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Explicitly shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
ShrinkNewSpace(new_space);
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
// Let the scavenger empty the new space.
EmptyNewSpaceUsingGC();
CHECK_LE(new_space->Size(), old_capacity);
// Explicitly shrinking should halve the space capacity.
old_capacity = new_space->TotalCapacity();
ShrinkNewSpace(new_space);
new_capacity = new_space->TotalCapacity();
if (v8_flags.minor_ms) {
// Shrinking may not be able to remove any pages if all contain live
// objects.
CHECK_GE(old_capacity, new_capacity);
} else {
CHECK_EQ(old_capacity, 2 * new_capacity);
}
// Consecutive shrinking should not affect space capacity.
old_capacity = new_space->TotalCapacity();
ShrinkNewSpace(new_space);
ShrinkNewSpace(new_space);
ShrinkNewSpace(new_space);
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
TEST_F(HeapTest, CollectingAllAvailableGarbageShrinksNewSpace) {
if (v8_flags.single_generation) return;
v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
if (heap()->MaxSemiSpaceSize() == heap()->InitialSemiSpaceSize()) {
return;
}
v8::Isolate* iso = reinterpret_cast<v8::Isolate*>(isolate());
v8::HandleScope scope(iso);
NewSpace* new_space = heap()->new_space();
size_t old_capacity, new_capacity;
old_capacity = new_space->TotalCapacity();
GrowNewSpace();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(2 * old_capacity, new_capacity);
{
v8::HandleScope temporary_scope(iso);
SimulateFullSpace(new_space);
}
InvokeMemoryReducingMajorGCs();
new_capacity = new_space->TotalCapacity();
CHECK_EQ(old_capacity, new_capacity);
}
// Test that HAllocateObject will always return an object in new-space.
TEST_F(HeapTest, OptimizedAllocationAlwaysInNewSpace) {
if (v8_flags.single_generation) return;
v8_flags.allow_natives_syntax = true;
v8_flags.stress_concurrent_allocation = false; // For SimulateFullSpace.
if (!isolate()->use_optimizer() || v8_flags.always_turbofan) return;
if (v8_flags.gc_global || v8_flags.stress_compaction ||
v8_flags.stress_incremental_marking)
return;
v8::Isolate* iso = reinterpret_cast<v8::Isolate*>(isolate());
v8::HandleScope scope(iso);
v8::Local<v8::Context> ctx = iso->GetCurrentContext();
SimulateFullSpace(heap()->new_space());
AlwaysAllocateScopeForTesting always_allocate(heap());
v8::Local<v8::Value> res = WithIsolateScopeMixin::RunJS(
"function c(x) {"
" this.x = x;"
" for (var i = 0; i < 32; i++) {"
" this['x' + i] = x;"
" }"
"}"
"function f(x) { return new c(x); };"
"%PrepareFunctionForOptimization(f);"
"f(1); f(2); f(3);"
"%OptimizeFunctionOnNextCall(f);"
"f(4);");
CHECK_EQ(4, res.As<v8::Object>()
->GetRealNamedProperty(ctx, NewString("x"))
.ToLocalChecked()
->Int32Value(ctx)
.FromJust());
i::DirectHandle<JSReceiver> o =
v8::Utils::OpenDirectHandle(*v8::Local<v8::Object>::Cast(res));
CHECK(Heap::InYoungGeneration(*o));
}
namespace {
template <RememberedSetType direction>
static size_t GetRememberedSetSize(Tagged<HeapObject> obj) {
size_t count = 0;
auto chunk = MutablePageMetadata::FromHeapObject(obj);
RememberedSet<direction>::Iterate(
chunk,
[&count](MaybeObjectSlot slot) {
count++;
return KEEP_SLOT;
},
SlotSet::KEEP_EMPTY_BUCKETS);
return count;
}
} // namespace
TEST_F(HeapTest, RememberedSet_InsertOnPromotingObjectToOld) {
if (v8_flags.single_generation || v8_flags.stress_incremental_marking) return;
v8_flags.stress_concurrent_allocation = false; // For SealCurrentObjects.
Factory* factory = isolate()->factory();
Heap* heap = isolate()->heap();
SealCurrentObjects();
HandleScope scope(isolate());
// Create a young object and age it one generation inside the new space.
DirectHandle<FixedArray> arr = factory->NewFixedArray(1);
std::vector<Handle<FixedArray>> handles;
if (v8_flags.minor_ms) {
NewSpace* new_space = heap->new_space();
CHECK_NE(new_space->TotalCapacity(), new_space->MaximumCapacity());
// Fill current pages to force MinorMS to promote them.
SimulateFullSpace(new_space, &handles);
IsolateSafepointScope scope(heap);
// New empty pages should remain in new space.
new_space->Grow();
CHECK(new_space->EnsureCurrentCapacity());
}
InvokeMinorGC();
CHECK(Heap::InYoungGeneration(*arr));
// Add into 'arr' a reference to an object one generation younger.
{
HandleScope scope_inner(isolate());
DirectHandle<Object> number = factory->NewHeapNumber(42);
arr->set(0, *number);
}
// Promote 'arr' into old, its element is still in new, the old to new
// refs are inserted into the remembered sets during GC.
InvokeMinorGC();
heap->EnsureSweepingCompleted(Heap::SweepingForcedFinalizationMode::kV8Only);
CHECK(heap->InOldSpace(*arr));
CHECK(heap->InYoungGeneration(arr->get(0)));
if (v8_flags.minor_ms) {
CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW_BACKGROUND>(*arr));
} else {
CHECK_EQ(1, GetRememberedSetSize<OLD_TO_NEW>(*arr));
}
}
TEST_F(HeapTest, Regress978156) {
if (!v8_flags.incremental_marking) return;
if (v8_flags.single_generation) return;
ManualGCScope manual_gc_scope(isolate());
HandleScope handle_scope(isolate());
Heap* heap = isolate()->heap();
// 1. Ensure that the new space is empty.
EmptyNewSpaceUsingGC();
// 2. Fill the new space with FixedArrays.
std::vector<Handle<FixedArray>> arrays;
SimulateFullSpace(heap->new_space(), &arrays);
// 3. Trim the last array by one word thus creating a one-word filler.
DirectHandle<FixedArray> last = arrays.back();
CHECK_GT(last->length(), 0);
heap->RightTrimArray(*last, last->length() - 1, last->length());
// 4. Get the last filler on the page.
Tagged<HeapObject> filler = HeapObject::FromAddress(
MutablePageMetadata::FromHeapObject(*last)->area_end() - kTaggedSize);
HeapObject::FromAddress(last->address() + last->Size());
CHECK(IsFiller(filler));
// 5. Start incremental marking.
i::IncrementalMarking* marking = heap->incremental_marking();
if (marking->IsStopped()) {
IsolateSafepointScope scope(heap);
heap->tracer()->StartCycle(
GarbageCollector::MARK_COMPACTOR, GarbageCollectionReason::kTesting,
"collector cctest", GCTracer::MarkingType::kIncremental);
marking->Start(GarbageCollector::MARK_COMPACTOR,
i::GarbageCollectionReason::kTesting);
}
// 6. Mark the filler black to access its two markbits. This triggers
// an out-of-bounds access of the marking bitmap in a bad case.
heap->marking_state()->TryMarkAndAccountLiveBytes(filler);
}
#ifdef V8_ENABLE_ALLOCATION_TIMEOUT
namespace {
struct RandomGCIntervalTestSetter {
RandomGCIntervalTestSetter() {
static constexpr int kInterval = 87;
v8_flags.random_gc_interval = kInterval;
}
~RandomGCIntervalTestSetter() { v8_flags.random_gc_interval = 0; }
};
struct HeapTestWithRandomGCInterval : RandomGCIntervalTestSetter, HeapTest {};
} // namespace
TEST_F(HeapTestWithRandomGCInterval, AllocationTimeout) {
if (v8_flags.stress_incremental_marking) return;
if (v8_flags.stress_concurrent_allocation) return;
auto* allocator = heap()->allocator();
// Invoke major GC to cause the timeout to be updated.
InvokeMajorGC();
const int initial_allocation_timeout =
allocator->get_allocation_timeout_for_testing().value_or(0);
ASSERT_GT(initial_allocation_timeout, 0);
for (int i = 0; i < initial_allocation_timeout - 1; ++i) {
AllocationResult allocation = allocator->AllocateRaw(
2 * kTaggedAligned, AllocationType::kYoung, AllocationOrigin::kRuntime);
EXPECT_FALSE(allocation.IsFailure());
}
// The last allocation must fail.
AllocationResult allocation = allocator->AllocateRaw(
2 * kTaggedAligned, AllocationType::kYoung, AllocationOrigin::kRuntime);
EXPECT_TRUE(allocation.IsFailure());
}
#endif // V8_ENABLE_ALLOCATION_TIMEOUT
TEST_F(HeapTest, Regress341769455) {
#ifdef V8_COMPRESS_POINTERS
if (!v8_flags.incremental_marking) return;
if (!v8_flags.minor_ms) return;
Isolate* iso = isolate();
Heap* heap = iso->heap();
HandleScope outer(iso);
Handle<JSArrayBuffer> ab;
{
ManualGCScope manual_gc_scope(isolate());
// Make sure new space is empty
InvokeAtomicMajorGC();
ab = iso->factory()
->NewJSArrayBufferAndBackingStore(
1, InitializedFlag::kZeroInitialized, AllocationType::kYoung)
.ToHandleChecked();
// Reset the EPT handle to null.
ab->init_extension();
// MinorMS promotes pages that haven't been allocated on since the last GC.
// Force a minor GC to reset the counter of bytes allocated on the page.
InvokeAtomicMinorGC();
// Set up a global to make sure the JSArrayBuffer is visited before the
// atomic pause.
Global<JSArrayBuffer> global(
v8_isolate(), Utils::Convert<JSArrayBuffer, JSArrayBuffer>(ab));
CHECK_EQ(Heap::HeapState::NOT_IN_GC, heap->gc_state());
CHECK(heap->incremental_marking()->IsStopped());
// Start incremental marking such that setting an extension (via
// `EnsureExtension`) triggers a write barrier.
v8_flags.incremental_marking = true;
heap->StartIncrementalMarking(GCFlag::kNoFlags,
GarbageCollectionReason::kTesting,
GCCallbackFlags::kNoGCCallbackFlags,
GarbageCollector::MINOR_MARK_SWEEPER);
CHECK(heap->incremental_marking()->IsMinorMarking());
heap->minor_mark_sweep_collector()->DrainMarkingWorklistForTesting();
ab->EnsureExtension();
heap->AppendArrayBufferExtension(*ab, ab->extension());
}
// Trigger a 2nd minor GC to promote the JSArrayBuffer to old space.
CHECK(Heap::InYoungGeneration(*ab));
InvokeAtomicMinorGC();
CHECK(!Heap::InYoungGeneration(*ab));
// If the EPT entry for the JSArrayBuffer wasn't promoted to the old table, a
// 3rd minor GC will observe it as unmarked (since the owning object is old)
// and free it. The major GC after it will then crash when trying to access
// the extension of the JSArrayBuffer although the entry has been freed.
InvokeAtomicMinorGC();
InvokeAtomicMajorGC();
#endif // V8_COMPRESS_POINTERS
}
} // namespace internal
} // namespace v8