5 #include "src/heap/spaces.h" 9 #include "src/base/bits.h" 10 #include "src/base/macros.h" 11 #include "src/base/platform/semaphore.h" 12 #include "src/base/template-utils.h" 13 #include "src/counters.h" 14 #include "src/heap/array-buffer-tracker.h" 15 #include "src/heap/concurrent-marking.h" 16 #include "src/heap/gc-tracer.h" 17 #include "src/heap/heap-controller.h" 18 #include "src/heap/incremental-marking-inl.h" 19 #include "src/heap/mark-compact.h" 20 #include "src/heap/remembered-set.h" 21 #include "src/heap/slot-set.h" 22 #include "src/heap/sweeper.h" 24 #include "src/objects-inl.h" 25 #include "src/objects/js-array-buffer-inl.h" 26 #include "src/objects/js-array-inl.h" 27 #include "src/snapshot/snapshot.h" 29 #include "src/vm-state-inl.h" 38 STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
39 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
40 STATIC_ASSERT(kClearedWeakHeapObjectLower32 < LargePage::kHeaderSize);
45 HeapObjectIterator::HeapObjectIterator(PagedSpace* space)
46 : cur_addr_(kNullAddress),
47 cur_end_(kNullAddress),
49 page_range_(space->first_page(), nullptr),
50 current_page_(page_range_.begin()) {}
52 HeapObjectIterator::HeapObjectIterator(Page* page)
53 : cur_addr_(kNullAddress),
54 cur_end_(kNullAddress),
55 space_(reinterpret_cast<PagedSpace*>(page->owner())),
57 current_page_(page_range_.begin()) {
59 Space* owner = page->owner();
60 DCHECK(owner == page->heap()->old_space() ||
61 owner == page->heap()->map_space() ||
62 owner == page->heap()->code_space() ||
63 owner == page->heap()->read_only_space());
69 bool HeapObjectIterator::AdvanceToNextPage() {
70 DCHECK_EQ(cur_addr_, cur_end_);
71 if (current_page_ == page_range_.end())
return false;
72 Page* cur_page = *(current_page_++);
73 Heap* heap = space_->heap();
75 heap->mark_compact_collector()->sweeper()->EnsurePageIsIterable(cur_page);
76 #ifdef ENABLE_MINOR_MC 77 if (cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE))
78 heap->minor_mark_compact_collector()->MakeIterable(
79 cur_page, MarkingTreatmentMode::CLEAR,
80 FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
82 DCHECK(!cur_page->IsFlagSet(Page::SWEEP_TO_ITERATE));
83 #endif // ENABLE_MINOR_MC 84 cur_addr_ = cur_page->area_start();
85 cur_end_ = cur_page->area_end();
86 DCHECK(cur_page->SweepingDone());
90 PauseAllocationObserversScope::PauseAllocationObserversScope(Heap* heap)
92 DCHECK_EQ(heap->gc_state(), Heap::NOT_IN_GC);
94 for (SpaceIterator it(heap_); it.has_next();) {
95 it.next()->PauseAllocationObservers();
99 PauseAllocationObserversScope::~PauseAllocationObserversScope() {
100 for (SpaceIterator it(heap_); it.has_next();) {
101 it.next()->ResumeAllocationObservers();
105 static base::LazyInstance<CodeRangeAddressHint>::type code_range_address_hint =
106 LAZY_INSTANCE_INITIALIZER;
108 Address CodeRangeAddressHint::GetAddressHint(
size_t code_range_size) {
109 base::MutexGuard guard(&mutex_);
110 auto it = recently_freed_.find(code_range_size);
111 if (it == recently_freed_.end() || it->second.empty()) {
112 return reinterpret_cast<Address
>(GetRandomMmapAddr());
114 Address result = it->second.back();
115 it->second.pop_back();
119 void CodeRangeAddressHint::NotifyFreedCodeRange(Address code_range_start,
120 size_t code_range_size) {
121 base::MutexGuard guard(&mutex_);
122 recently_freed_[code_range_size].push_back(code_range_start);
129 MemoryAllocator::MemoryAllocator(Isolate* isolate,
size_t capacity,
130 size_t code_range_size)
132 data_page_allocator_(isolate->page_allocator()),
133 code_page_allocator_(nullptr),
134 capacity_(RoundUp(capacity, Page::kPageSize)),
137 lowest_ever_allocated_(static_cast<Address>(-1ll)),
138 highest_ever_allocated_(kNullAddress),
139 unmapper_(isolate->heap(), this) {
140 InitializeCodePageAllocator(data_page_allocator_, code_range_size);
143 void MemoryAllocator::InitializeCodePageAllocator(
145 DCHECK_NULL(code_page_allocator_instance_.get());
147 code_page_allocator_ = page_allocator;
149 if (requested == 0) {
150 if (!kRequiresCodeRange)
return;
154 requested = kMaximalCodeRangeSize;
155 }
else if (requested <= kMinimumCodeRangeSize) {
156 requested = kMinimumCodeRangeSize;
159 const size_t reserved_area =
160 kReservedCodeRangePages * MemoryAllocator::GetCommitPageSize();
161 if (requested < (kMaximalCodeRangeSize - reserved_area)) {
162 requested += RoundUp(reserved_area, MemoryChunk::kPageSize);
167 DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
170 RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested),
172 VirtualMemory reservation(
173 page_allocator, requested, reinterpret_cast<void*>(hint),
175 if (!reservation.IsReserved()) {
176 V8::FatalProcessOutOfMemory(isolate_,
177 "CodeRange setup: allocate virtual memory");
179 code_range_ = reservation.region();
182 DCHECK_GE(reservation.size(), requested);
183 Address base = reservation.address();
190 if (reserved_area > 0) {
191 if (!reservation.SetPermissions(base, reserved_area,
192 PageAllocator::kReadWrite))
193 V8::FatalProcessOutOfMemory(isolate_,
"CodeRange setup: set permissions");
195 base += reserved_area;
197 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment);
199 RoundDown(reservation.size() - (aligned_base - base) - reserved_area,
200 MemoryChunk::kPageSize);
201 DCHECK(IsAligned(aligned_base, kMinExpectedOSPageSize));
204 NewEvent(
"CodeRange", reinterpret_cast<void*>(reservation.address()),
207 heap_reservation_.TakeControl(&reservation);
208 code_page_allocator_instance_ = base::make_unique<base::BoundedPageAllocator>(
209 page_allocator, aligned_base, size,
210 static_cast<size_t>(MemoryChunk::kAlignment));
211 code_page_allocator_ = code_page_allocator_instance_.get();
214 void MemoryAllocator::TearDown() {
215 unmapper()->TearDown();
218 DCHECK_EQ(size_, 0u);
223 if (last_chunk_.IsReserved()) {
227 if (code_page_allocator_instance_.get()) {
228 DCHECK(!code_range_.is_empty());
229 code_range_address_hint.Pointer()->NotifyFreedCodeRange(code_range_.begin(),
231 code_range_ = base::AddressRegion();
232 code_page_allocator_instance_.reset();
234 code_page_allocator_ =
nullptr;
235 data_page_allocator_ =
nullptr;
243 tracer_(isolate->heap()->tracer()) {}
246 void RunInternal()
override {
247 TRACE_BACKGROUND_GC(tracer_,
248 GCTracer::BackgroundScope::BACKGROUND_UNMAPPER);
249 unmapper_->PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
250 unmapper_->active_unmapping_tasks_--;
251 unmapper_->pending_unmapping_tasks_semaphore_.Signal();
252 if (FLAG_trace_unmapper) {
253 PrintIsolate(unmapper_->heap_->isolate(),
254 "UnmapFreeMemoryTask Done: id=%" PRIu64
"\n", id());
263 void MemoryAllocator::Unmapper::FreeQueuedChunks() {
264 if (!heap_->IsTearingDown() && FLAG_concurrent_sweeping) {
265 if (!MakeRoomForNewTasks()) {
267 if (FLAG_trace_unmapper) {
268 PrintIsolate(heap_->isolate(),
269 "Unmapper::FreeQueuedChunks: reached task limit (%d)\n",
274 auto task = base::make_unique<UnmapFreeMemoryTask>(heap_->isolate(),
this);
275 if (FLAG_trace_unmapper) {
276 PrintIsolate(heap_->isolate(),
277 "Unmapper::FreeQueuedChunks: new task id=%" PRIu64
"\n",
280 DCHECK_LT(pending_unmapping_tasks_, kMaxUnmapperTasks);
281 DCHECK_LE(active_unmapping_tasks_, pending_unmapping_tasks_);
282 DCHECK_GE(active_unmapping_tasks_, 0);
283 active_unmapping_tasks_++;
284 task_ids_[pending_unmapping_tasks_++] = task->id();
285 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
287 PerformFreeMemoryOnQueuedChunks<FreeMode::kUncommitPooled>();
291 void MemoryAllocator::Unmapper::CancelAndWaitForPendingTasks() {
292 for (
int i = 0;
i < pending_unmapping_tasks_;
i++) {
293 if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[
i]) !=
294 TryAbortResult::kTaskAborted) {
295 pending_unmapping_tasks_semaphore_.Wait();
298 pending_unmapping_tasks_ = 0;
299 active_unmapping_tasks_ = 0;
301 if (FLAG_trace_unmapper) {
304 "Unmapper::CancelAndWaitForPendingTasks: no tasks remaining\n");
308 void MemoryAllocator::Unmapper::PrepareForMarkCompact() {
309 CancelAndWaitForPendingTasks();
311 PerformFreeMemoryOnQueuedNonRegularChunks();
314 void MemoryAllocator::Unmapper::EnsureUnmappingCompleted() {
315 CancelAndWaitForPendingTasks();
316 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
319 bool MemoryAllocator::Unmapper::MakeRoomForNewTasks() {
320 DCHECK_LE(pending_unmapping_tasks_, kMaxUnmapperTasks);
322 if (active_unmapping_tasks_ == 0 && pending_unmapping_tasks_ > 0) {
325 CancelAndWaitForPendingTasks();
327 return pending_unmapping_tasks_ != kMaxUnmapperTasks;
330 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedNonRegularChunks() {
331 MemoryChunk* chunk =
nullptr;
332 while ((chunk = GetMemoryChunkSafe<kNonRegular>()) !=
nullptr) {
333 allocator_->PerformFreeMemory(chunk);
337 template <MemoryAllocator::Unmapper::FreeMode mode>
338 void MemoryAllocator::Unmapper::PerformFreeMemoryOnQueuedChunks() {
339 MemoryChunk* chunk =
nullptr;
340 if (FLAG_trace_unmapper) {
343 "Unmapper::PerformFreeMemoryOnQueuedChunks: %d queued chunks\n",
347 while ((chunk = GetMemoryChunkSafe<kRegular>()) !=
nullptr) {
348 bool pooled = chunk->IsFlagSet(MemoryChunk::POOLED);
349 allocator_->PerformFreeMemory(chunk);
350 if (pooled) AddMemoryChunkSafe<kPooled>(chunk);
352 if (mode == MemoryAllocator::Unmapper::FreeMode::kReleasePooled) {
356 while ((chunk = GetMemoryChunkSafe<kPooled>()) !=
nullptr) {
357 allocator_->Free<MemoryAllocator::kAlreadyPooled>(chunk);
360 PerformFreeMemoryOnQueuedNonRegularChunks();
363 void MemoryAllocator::Unmapper::TearDown() {
364 CHECK_EQ(0, pending_unmapping_tasks_);
365 PerformFreeMemoryOnQueuedChunks<FreeMode::kReleasePooled>();
366 for (
int i = 0;
i < kNumberOfChunkQueues;
i++) {
367 DCHECK(chunks_[
i].empty());
371 size_t MemoryAllocator::Unmapper::NumberOfCommittedChunks() {
372 base::MutexGuard guard(&mutex_);
373 return chunks_[kRegular].size() + chunks_[kNonRegular].size();
376 int MemoryAllocator::Unmapper::NumberOfChunks() {
377 base::MutexGuard guard(&mutex_);
379 for (
int i = 0;
i < kNumberOfChunkQueues;
i++) {
380 result += chunks_[
i].size();
382 return static_cast<int>(result);
385 size_t MemoryAllocator::Unmapper::CommittedBufferedMemory() {
386 base::MutexGuard guard(&mutex_);
391 for (
auto& chunk : chunks_[kRegular]) {
392 sum += chunk->size();
394 for (
auto& chunk : chunks_[kNonRegular]) {
395 sum += chunk->size();
400 bool MemoryAllocator::CommitMemory(VirtualMemory* reservation) {
401 Address base = reservation->address();
402 size_t size = reservation->size();
403 if (!reservation->SetPermissions(base, size, PageAllocator::kReadWrite)) {
406 UpdateAllocatedSpaceLimits(base, base + size);
407 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
411 bool MemoryAllocator::UncommitMemory(VirtualMemory* reservation) {
412 size_t size = reservation->size();
413 if (!reservation->SetPermissions(reservation->address(), size,
414 PageAllocator::kNoAccess)) {
417 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
422 Address base,
size_t size) {
423 CHECK(FreePages(page_allocator, reinterpret_cast<void*>(base), size));
426 Address MemoryAllocator::AllocateAlignedMemory(
427 size_t reserve_size,
size_t commit_size,
size_t alignment,
428 Executability executable,
void* hint, VirtualMemory* controller) {
430 DCHECK(commit_size <= reserve_size);
431 VirtualMemory reservation(page_allocator, reserve_size, hint, alignment);
432 if (!reservation.IsReserved())
return kNullAddress;
433 Address base = reservation.address();
434 size_ += reservation.size();
436 if (executable == EXECUTABLE) {
437 if (!CommitExecutableMemory(&reservation, base, commit_size,
442 if (reservation.SetPermissions(base, commit_size,
443 PageAllocator::kReadWrite)) {
444 UpdateAllocatedSpaceLimits(base, base + commit_size);
450 if (base == kNullAddress) {
454 size_ -= reserve_size;
458 controller->TakeControl(&reservation);
462 void MemoryChunk::DiscardUnusedMemory(Address addr,
size_t size) {
463 base::AddressRegion memory_area =
464 MemoryAllocator::ComputeDiscardMemoryArea(addr, size);
465 if (memory_area.size() != 0) {
466 MemoryAllocator* memory_allocator = heap_->memory_allocator();
468 memory_allocator->page_allocator(executable());
470 reinterpret_cast<void*>(memory_area.begin()), memory_area.size()));
474 size_t MemoryChunkLayout::CodePageGuardStartOffset() {
477 return ::RoundUp(Page::kHeaderSize, MemoryAllocator::GetCommitPageSize());
480 size_t MemoryChunkLayout::CodePageGuardSize() {
481 return MemoryAllocator::GetCommitPageSize();
484 intptr_t MemoryChunkLayout::ObjectStartOffsetInCodePage() {
487 return CodePageGuardStartOffset() + CodePageGuardSize();
490 intptr_t MemoryChunkLayout::ObjectEndOffsetInCodePage() {
493 return Page::kPageSize -
494 static_cast<int>(MemoryAllocator::GetCommitPageSize());
497 size_t MemoryChunkLayout::AllocatableMemoryInCodePage() {
498 size_t memory = ObjectEndOffsetInCodePage() - ObjectStartOffsetInCodePage();
499 DCHECK_LE(kMaxRegularHeapObjectSize, memory);
503 intptr_t MemoryChunkLayout::ObjectStartOffsetInDataPage() {
504 return MemoryChunk::kHeaderSize +
505 (kPointerSize - MemoryChunk::kHeaderSize % kPointerSize);
508 size_t MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(
509 AllocationSpace space) {
510 if (space == CODE_SPACE) {
511 return ObjectStartOffsetInCodePage();
513 return ObjectStartOffsetInDataPage();
516 size_t MemoryChunkLayout::AllocatableMemoryInDataPage() {
517 size_t memory = MemoryChunk::kPageSize - ObjectStartOffsetInDataPage();
518 DCHECK_LE(kMaxRegularHeapObjectSize, memory);
522 size_t MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
523 AllocationSpace space) {
524 if (space == CODE_SPACE) {
525 return AllocatableMemoryInCodePage();
527 return AllocatableMemoryInDataPage();
530 Heap* MemoryChunk::synchronized_heap() {
531 return reinterpret_cast<Heap*
>(
532 base::Acquire_Load(reinterpret_cast<base::AtomicWord*>(&heap_)));
535 void MemoryChunk::InitializationMemoryFence() {
536 base::SeqCst_MemoryFence();
537 #ifdef THREAD_SANITIZER 542 base::Release_Store(reinterpret_cast<base::AtomicWord*>(&heap_),
543 reinterpret_cast<base::AtomicWord>(heap_));
547 void MemoryChunk::SetReadAndExecutable() {
548 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
549 DCHECK(owner()->identity() == CODE_SPACE ||
550 owner()->identity() == CODE_LO_SPACE);
553 base::MutexGuard guard(page_protection_change_mutex_);
554 if (write_unprotect_counter_ == 0) {
560 write_unprotect_counter_--;
561 DCHECK_LT(write_unprotect_counter_, kMaxWriteUnprotectCounter);
562 if (write_unprotect_counter_ == 0) {
563 Address protect_start =
564 address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
565 size_t page_size = MemoryAllocator::GetCommitPageSize();
566 DCHECK(IsAligned(protect_start, page_size));
567 size_t protect_size = RoundUp(area_size(), page_size);
568 CHECK(reservation_.SetPermissions(protect_start, protect_size,
569 PageAllocator::kReadExecute));
573 void MemoryChunk::SetReadAndWritable() {
574 DCHECK(IsFlagSet(MemoryChunk::IS_EXECUTABLE));
575 DCHECK(owner()->identity() == CODE_SPACE ||
576 owner()->identity() == CODE_LO_SPACE);
579 base::MutexGuard guard(page_protection_change_mutex_);
580 write_unprotect_counter_++;
581 DCHECK_LE(write_unprotect_counter_, kMaxWriteUnprotectCounter);
582 if (write_unprotect_counter_ == 1) {
583 Address unprotect_start =
584 address() + MemoryChunkLayout::ObjectStartOffsetInCodePage();
585 size_t page_size = MemoryAllocator::GetCommitPageSize();
586 DCHECK(IsAligned(unprotect_start, page_size));
587 size_t unprotect_size = RoundUp(area_size(), page_size);
588 CHECK(reservation_.SetPermissions(unprotect_start, unprotect_size,
589 PageAllocator::kReadWrite));
593 MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base,
size_t size,
594 Address area_start, Address area_end,
595 Executability executable, Space* owner,
596 VirtualMemory reservation) {
597 MemoryChunk* chunk = FromAddress(base);
599 DCHECK(base == chunk->address());
603 chunk->area_start_ = area_start;
604 chunk->area_end_ = area_end;
605 chunk->flags_ = Flags(NO_FLAGS);
606 chunk->set_owner(owner);
607 chunk->InitializeReservedMemory();
608 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_NEW],
nullptr);
609 base::AsAtomicPointer::Release_Store(&chunk->slot_set_[OLD_TO_OLD],
nullptr);
610 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_NEW],
612 base::AsAtomicPointer::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
614 chunk->invalidated_slots_ =
nullptr;
615 chunk->skip_list_ =
nullptr;
616 chunk->progress_bar_ = 0;
617 chunk->high_water_mark_ =
static_cast<intptr_t
>(area_start - base);
618 chunk->set_concurrent_sweeping_state(kSweepingDone);
619 chunk->page_protection_change_mutex_ =
new base::Mutex();
620 chunk->write_unprotect_counter_ = 0;
621 chunk->mutex_ =
new base::Mutex();
622 chunk->allocated_bytes_ = chunk->area_size();
623 chunk->wasted_memory_ = 0;
624 chunk->young_generation_bitmap_ =
nullptr;
625 chunk->marking_bitmap_ =
nullptr;
626 chunk->local_tracker_ =
nullptr;
628 chunk->external_backing_store_bytes_[ExternalBackingStoreType::kArrayBuffer] =
630 chunk->external_backing_store_bytes_
631 [ExternalBackingStoreType::kExternalString] = 0;
633 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
634 chunk->categories_[
i] =
nullptr;
637 chunk->AllocateMarkingBitmap();
638 if (owner->identity() == RO_SPACE) {
639 heap->incremental_marking()
640 ->non_atomic_marking_state()
644 heap->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(chunk,
648 DCHECK_EQ(kFlagsOffset, OFFSET_OF(MemoryChunk, flags_));
650 if (executable == EXECUTABLE) {
651 chunk->SetFlag(IS_EXECUTABLE);
652 if (heap->write_protect_code_memory()) {
653 chunk->write_unprotect_counter_ =
654 heap->code_space_memory_modification_scope_depth();
656 size_t page_size = MemoryAllocator::GetCommitPageSize();
657 DCHECK(IsAligned(area_start, page_size));
658 size_t area_size = RoundUp(area_end - area_start, page_size);
659 CHECK(reservation.SetPermissions(area_start, area_size,
660 PageAllocator::kReadWriteExecute));
664 chunk->reservation_ = std::move(reservation);
669 Page* PagedSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
670 Page* page =
static_cast<Page*
>(chunk);
671 DCHECK_EQ(MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
672 page->owner()->identity()),
675 page->ResetAllocatedBytes();
676 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
677 page->AllocateFreeListCategories();
678 page->InitializeFreeListCategories();
679 page->list_node().Initialize();
680 page->InitializationMemoryFence();
684 Page* SemiSpace::InitializePage(MemoryChunk* chunk, Executability executable) {
685 DCHECK_EQ(executable, Executability::NOT_EXECUTABLE);
686 bool in_to_space = (id() != kFromSpace);
687 chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
688 : MemoryChunk::IN_FROM_SPACE);
689 DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
690 : MemoryChunk::IN_TO_SPACE));
691 Page* page =
static_cast<Page*
>(chunk);
692 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
693 page->AllocateLocalTracker();
694 page->list_node().Initialize();
695 #ifdef ENABLE_MINOR_MC 697 page->AllocateYoungGenerationBitmap();
699 ->minor_mark_compact_collector()
700 ->non_atomic_marking_state()
701 ->ClearLiveness(page);
703 #endif // ENABLE_MINOR_MC 704 page->InitializationMemoryFence();
708 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk,
709 Executability executable) {
710 if (executable && chunk->size() > LargePage::kMaxCodePageSize) {
711 STATIC_ASSERT(LargePage::kMaxCodePageSize <= TypedSlotSet::kMaxOffset);
712 FATAL(
"Code page is too large.");
715 MSAN_ALLOCATED_UNINITIALIZED_MEMORY(chunk->area_start(), chunk->area_size());
717 LargePage* page =
static_cast<LargePage*
>(chunk);
718 page->list_node().Initialize();
722 void Page::AllocateFreeListCategories() {
723 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
724 categories_[
i] =
new FreeListCategory(
725 reinterpret_cast<PagedSpace*>(owner())->free_list(),
this);
729 void Page::InitializeFreeListCategories() {
730 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
731 categories_[
i]->Initialize(static_cast<FreeListCategoryType>(
i));
735 void Page::ReleaseFreeListCategories() {
736 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
737 if (categories_[
i] !=
nullptr) {
738 delete categories_[
i];
739 categories_[
i] =
nullptr;
744 Page* Page::ConvertNewToOld(Page* old_page) {
746 DCHECK(old_page->InNewSpace());
747 OldSpace* old_space = old_page->heap()->old_space();
748 old_page->set_owner(old_space);
749 old_page->SetFlags(0, static_cast<uintptr_t>(~0));
750 Page* new_page = old_space->InitializePage(old_page, NOT_EXECUTABLE);
751 old_space->AddPage(new_page);
755 size_t MemoryChunk::CommittedPhysicalMemory() {
756 if (!base::OS::HasLazyCommits() || owner()->identity() == LO_SPACE)
758 return high_water_mark_;
761 bool MemoryChunk::IsPagedSpace()
const {
762 return owner()->identity() != LO_SPACE;
765 bool MemoryChunk::InOldSpace()
const {
766 return owner()->identity() == OLD_SPACE;
769 bool MemoryChunk::InLargeObjectSpace()
const {
770 return owner()->identity() == LO_SPACE;
773 MemoryChunk* MemoryAllocator::AllocateChunk(
size_t reserve_area_size,
774 size_t commit_area_size,
775 Executability executable,
777 DCHECK_LE(commit_area_size, reserve_area_size);
780 Heap* heap = isolate_->heap();
781 Address base = kNullAddress;
782 VirtualMemory reservation;
783 Address area_start = kNullAddress;
784 Address area_end = kNullAddress;
786 AlignedAddress(heap->GetRandomMmapAddr(), MemoryChunk::kAlignment);
818 if (executable == EXECUTABLE) {
819 chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() +
821 MemoryChunkLayout::CodePageGuardSize(),
822 GetCommitPageSize());
825 size_t commit_size = ::RoundUp(
826 MemoryChunkLayout::CodePageGuardStartOffset() + commit_area_size,
827 GetCommitPageSize());
829 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
830 executable, address_hint, &reservation);
831 if (base == kNullAddress)
return nullptr;
833 size_executable_ += reservation.size();
835 if (Heap::ShouldZapGarbage()) {
836 ZapBlock(base, MemoryChunkLayout::CodePageGuardStartOffset(), kZapValue);
837 ZapBlock(base + MemoryChunkLayout::ObjectStartOffsetInCodePage(),
838 commit_area_size, kZapValue);
841 area_start = base + MemoryChunkLayout::ObjectStartOffsetInCodePage();
842 area_end = area_start + commit_area_size;
844 chunk_size = ::RoundUp(
845 MemoryChunkLayout::ObjectStartOffsetInDataPage() + reserve_area_size,
846 GetCommitPageSize());
847 size_t commit_size = ::RoundUp(
848 MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
849 GetCommitPageSize());
851 AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
852 executable, address_hint, &reservation);
854 if (base == kNullAddress)
return nullptr;
856 if (Heap::ShouldZapGarbage()) {
859 MemoryChunkLayout::ObjectStartOffsetInDataPage() + commit_area_size,
863 area_start = base + MemoryChunkLayout::ObjectStartOffsetInDataPage();
864 area_end = area_start + commit_area_size;
869 isolate_->counters()->memory_allocated()->Increment(
870 static_cast<int>(chunk_size));
873 NewEvent(
"MemoryChunk", reinterpret_cast<void*>(base), chunk_size));
878 if ((base + chunk_size) == 0u) {
879 CHECK(!last_chunk_.IsReserved());
880 last_chunk_.TakeControl(&reservation);
881 UncommitMemory(&last_chunk_);
883 if (executable == EXECUTABLE) {
884 size_executable_ -= chunk_size;
886 CHECK(last_chunk_.IsReserved());
887 return AllocateChunk(reserve_area_size, commit_area_size, executable,
892 MemoryChunk::Initialize(heap, base, chunk_size, area_start, area_end,
893 executable, owner, std::move(reservation));
895 if (chunk->executable()) RegisterExecutableMemoryChunk(chunk);
899 void MemoryChunk::SetOldGenerationPageFlags(
bool is_marking) {
901 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
902 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
903 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
905 ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
906 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
907 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
911 void MemoryChunk::SetYoungGenerationPageFlags(
bool is_marking) {
912 SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
914 SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
915 SetFlag(MemoryChunk::INCREMENTAL_MARKING);
917 ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
918 ClearFlag(MemoryChunk::INCREMENTAL_MARKING);
922 void Page::ResetAllocatedBytes() { allocated_bytes_ = area_size(); }
924 void Page::AllocateLocalTracker() {
925 DCHECK_NULL(local_tracker_);
926 local_tracker_ =
new LocalArrayBufferTracker(
this);
929 bool Page::contains_array_buffers() {
930 return local_tracker_ !=
nullptr && !local_tracker_->IsEmpty();
933 void Page::ResetFreeListStatistics() {
937 size_t Page::AvailableInFreeList() {
939 ForAllFreeListCategories([&sum](FreeListCategory* category) {
940 sum += category->available();
949 Address SkipFillers(HeapObject* filler, Address end) {
950 Address addr = filler->address();
952 filler = HeapObject::FromAddress(addr);
953 CHECK(filler->IsFiller());
954 addr = filler->address() + filler->Size();
961 size_t Page::ShrinkToHighWaterMark() {
964 VirtualMemory* reservation = reserved_memory();
965 if (!reservation->IsReserved())
return 0;
969 HeapObject* filler = HeapObject::FromAddress(HighWaterMark());
970 if (filler->address() == area_end())
return 0;
971 CHECK(filler->IsFiller());
973 DCHECK_EQ(area_end(), SkipFillers(filler, area_end()));
975 DCHECK_EQ(0u, AvailableInFreeList());
977 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler->address()),
978 MemoryAllocator::GetCommitPageSize());
980 DCHECK_EQ(0u, unused % MemoryAllocator::GetCommitPageSize());
981 if (FLAG_trace_gc_verbose) {
982 PrintIsolate(heap()->isolate(),
"Shrinking page %p: end %p -> %p\n",
983 reinterpret_cast<void*>(
this),
984 reinterpret_cast<void*>(area_end()),
985 reinterpret_cast<void*>(area_end() - unused));
987 heap()->CreateFillerObjectAt(
989 static_cast<int>(area_end() - filler->address() - unused),
990 ClearRecordedSlots::kNo);
991 heap()->memory_allocator()->PartialFreeMemory(
992 this, address() + size() - unused, unused, area_end() - unused);
993 if (filler->address() != area_end()) {
994 CHECK(filler->IsFiller());
995 CHECK_EQ(filler->address() + filler->Size(), area_end());
1001 void Page::CreateBlackArea(Address start, Address end) {
1002 DCHECK(heap()->incremental_marking()->black_allocation());
1003 DCHECK_EQ(Page::FromAddress(start),
this);
1004 DCHECK_NE(start, end);
1005 DCHECK_EQ(Page::FromAddress(end - 1),
this);
1006 IncrementalMarking::MarkingState* marking_state =
1007 heap()->incremental_marking()->marking_state();
1008 marking_state->bitmap(
this)->SetRange(AddressToMarkbitIndex(start),
1009 AddressToMarkbitIndex(end));
1010 marking_state->IncrementLiveBytes(
this, static_cast<intptr_t>(end - start));
1013 void Page::DestroyBlackArea(Address start, Address end) {
1014 DCHECK(heap()->incremental_marking()->black_allocation());
1015 DCHECK_EQ(Page::FromAddress(start),
this);
1016 DCHECK_NE(start, end);
1017 DCHECK_EQ(Page::FromAddress(end - 1),
this);
1018 IncrementalMarking::MarkingState* marking_state =
1019 heap()->incremental_marking()->marking_state();
1020 marking_state->bitmap(
this)->ClearRange(AddressToMarkbitIndex(start),
1021 AddressToMarkbitIndex(end));
1022 marking_state->IncrementLiveBytes(
this, -static_cast<intptr_t>(end - start));
1025 void MemoryAllocator::PartialFreeMemory(MemoryChunk* chunk, Address start_free,
1026 size_t bytes_to_free,
1027 Address new_area_end) {
1028 VirtualMemory* reservation = chunk->reserved_memory();
1029 DCHECK(reservation->IsReserved());
1030 chunk->size_ -= bytes_to_free;
1031 chunk->area_end_ = new_area_end;
1032 if (chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
1034 size_t page_size = GetCommitPageSize();
1035 DCHECK_EQ(0, chunk->area_end_ % static_cast<Address>(page_size));
1036 DCHECK_EQ(chunk->address() + chunk->size(),
1037 chunk->area_end() + MemoryChunkLayout::CodePageGuardSize());
1038 reservation->SetPermissions(chunk->area_end_, page_size,
1039 PageAllocator::kNoAccess);
1044 const size_t released_bytes = reservation->Release(start_free);
1045 DCHECK_GE(size_, released_bytes);
1046 size_ -= released_bytes;
1047 isolate_->counters()->memory_allocated()->Decrement(
1048 static_cast<int>(released_bytes));
1051 void MemoryAllocator::PreFreeMemory(MemoryChunk* chunk) {
1052 DCHECK(!chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1053 LOG(isolate_, DeleteEvent(
"MemoryChunk", chunk));
1055 isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
1056 chunk->IsEvacuationCandidate());
1058 VirtualMemory* reservation = chunk->reserved_memory();
1060 reservation->IsReserved() ? reservation->size() : chunk->size();
1061 DCHECK_GE(size_, static_cast<size_t>(size));
1063 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
1064 if (chunk->executable() == EXECUTABLE) {
1065 DCHECK_GE(size_executable_, size);
1066 size_executable_ -= size;
1069 chunk->SetFlag(MemoryChunk::PRE_FREED);
1071 if (chunk->executable()) UnregisterExecutableMemoryChunk(chunk);
1075 void MemoryAllocator::PerformFreeMemory(MemoryChunk* chunk) {
1076 DCHECK(chunk->IsFlagSet(MemoryChunk::PRE_FREED));
1077 chunk->ReleaseAllocatedMemory();
1079 VirtualMemory* reservation = chunk->reserved_memory();
1080 if (chunk->IsFlagSet(MemoryChunk::POOLED)) {
1081 UncommitMemory(reservation);
1083 if (reservation->IsReserved()) {
1084 reservation->Free();
1087 DCHECK_EQ(RO_SPACE, chunk->owner()->identity());
1088 FreeMemory(page_allocator(chunk->executable()), chunk->address(),
1094 template <MemoryAllocator::FreeMode mode>
1095 void MemoryAllocator::Free(MemoryChunk* chunk) {
1098 PreFreeMemory(chunk);
1099 PerformFreeMemory(chunk);
1101 case kAlreadyPooled:
1104 FreeMemory(data_page_allocator(), chunk->address(),
1105 static_cast<size_t>(MemoryChunk::kPageSize));
1107 case kPooledAndQueue:
1108 DCHECK_EQ(chunk->size(),
static_cast<size_t>(MemoryChunk::kPageSize));
1109 DCHECK_EQ(chunk->executable(), NOT_EXECUTABLE);
1110 chunk->SetFlag(MemoryChunk::POOLED);
1112 case kPreFreeAndQueue:
1113 PreFreeMemory(chunk);
1115 unmapper()->AddMemoryChunkSafe(chunk);
1120 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
void MemoryAllocator::Free<
1121 MemoryAllocator::kFull>(MemoryChunk* chunk);
1123 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
void MemoryAllocator::Free<
1124 MemoryAllocator::kAlreadyPooled>(MemoryChunk* chunk);
1126 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
void MemoryAllocator::Free<
1127 MemoryAllocator::kPreFreeAndQueue>(MemoryChunk* chunk);
1129 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
void MemoryAllocator::Free<
1130 MemoryAllocator::kPooledAndQueue>(MemoryChunk* chunk);
1132 template <MemoryAllocator::AllocationMode alloc_mode,
typename SpaceType>
1133 Page* MemoryAllocator::AllocatePage(
size_t size, SpaceType* owner,
1134 Executability executable) {
1135 MemoryChunk* chunk =
nullptr;
1136 if (alloc_mode == kPooled) {
1137 DCHECK_EQ(size, static_cast<size_t>(
1138 MemoryChunkLayout::AllocatableMemoryInMemoryChunk(
1139 owner->identity())));
1140 DCHECK_EQ(executable, NOT_EXECUTABLE);
1141 chunk = AllocatePagePooled(owner);
1143 if (chunk ==
nullptr) {
1144 chunk = AllocateChunk(size, size, executable, owner);
1146 if (chunk ==
nullptr)
return nullptr;
1147 return owner->InitializePage(chunk, executable);
1150 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1151 Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, PagedSpace>(
1152 size_t size, PagedSpace* owner, Executability executable);
1153 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1154 Page* MemoryAllocator::AllocatePage<MemoryAllocator::kRegular, SemiSpace>(
1155 size_t size, SemiSpace* owner, Executability executable);
1156 template EXPORT_TEMPLATE_DEFINE(V8_EXPORT_PRIVATE)
1157 Page* MemoryAllocator::AllocatePage<MemoryAllocator::kPooled, SemiSpace>(
1158 size_t size, SemiSpace* owner, Executability executable);
1160 LargePage* MemoryAllocator::AllocateLargePage(
size_t size,
1161 LargeObjectSpace* owner,
1162 Executability executable) {
1163 MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
1164 if (chunk ==
nullptr)
return nullptr;
1165 return LargePage::Initialize(isolate_->heap(), chunk, executable);
1168 template <
typename SpaceType>
1169 MemoryChunk* MemoryAllocator::AllocatePagePooled(SpaceType* owner) {
1170 MemoryChunk* chunk = unmapper()->TryGetPooledMemoryChunkSafe();
1171 if (chunk ==
nullptr)
return nullptr;
1172 const int size = MemoryChunk::kPageSize;
1173 const Address start =
reinterpret_cast<Address
>(chunk);
1174 const Address area_start =
1176 MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(owner->identity());
1177 const Address area_end = start + size;
1179 DCHECK_NE(CODE_SPACE, owner->identity());
1180 VirtualMemory reservation(data_page_allocator(), start, size);
1181 if (!CommitMemory(&reservation))
return nullptr;
1182 if (Heap::ShouldZapGarbage()) {
1183 ZapBlock(start, size, kZapValue);
1185 MemoryChunk::Initialize(isolate_->heap(), start, size, area_start, area_end,
1186 NOT_EXECUTABLE, owner, std::move(reservation));
1191 void MemoryAllocator::ZapBlock(Address start,
size_t size,
1193 DCHECK_EQ(start % kPointerSize, 0);
1194 DCHECK_EQ(size % kPointerSize, 0);
1195 for (
size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
1196 Memory<Address>(start + s) = static_cast<Address>(zap_value);
1200 intptr_t MemoryAllocator::GetCommitPageSize() {
1201 if (FLAG_v8_os_page_size != 0) {
1202 DCHECK(base::bits::IsPowerOfTwo(FLAG_v8_os_page_size));
1203 return FLAG_v8_os_page_size * KB;
1205 return CommitPageSize();
1209 base::AddressRegion MemoryAllocator::ComputeDiscardMemoryArea(Address addr,
1211 size_t page_size = MemoryAllocator::GetCommitPageSize();
1212 if (size < page_size + FreeSpace::kSize) {
1213 return base::AddressRegion(0, 0);
1215 Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size);
1216 Address discardable_end = RoundDown(addr + size, page_size);
1217 if (discardable_start >= discardable_end)
return base::AddressRegion(0, 0);
1218 return base::AddressRegion(discardable_start,
1219 discardable_end - discardable_start);
1222 bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm, Address start,
1224 size_t reserved_size) {
1225 const size_t page_size = GetCommitPageSize();
1227 DCHECK(IsAligned(start, page_size));
1228 DCHECK_EQ(0, commit_size % page_size);
1229 DCHECK_EQ(0, reserved_size % page_size);
1230 const size_t guard_size = MemoryChunkLayout::CodePageGuardSize();
1231 const size_t pre_guard_offset = MemoryChunkLayout::CodePageGuardStartOffset();
1232 const size_t code_area_offset =
1233 MemoryChunkLayout::ObjectStartOffsetInCodePage();
1235 DCHECK_LE(commit_size, reserved_size - 2 * guard_size);
1236 const Address pre_guard_page = start + pre_guard_offset;
1237 const Address code_area = start + code_area_offset;
1238 const Address post_guard_page = start + reserved_size - guard_size;
1240 if (vm->SetPermissions(start, pre_guard_offset, PageAllocator::kReadWrite)) {
1242 if (vm->SetPermissions(pre_guard_page, page_size,
1243 PageAllocator::kNoAccess)) {
1245 if (vm->SetPermissions(code_area, commit_size - pre_guard_offset,
1246 PageAllocator::kReadWrite)) {
1248 if (vm->SetPermissions(post_guard_page, page_size,
1249 PageAllocator::kNoAccess)) {
1250 UpdateAllocatedSpaceLimits(start, code_area + commit_size);
1253 vm->SetPermissions(code_area, commit_size, PageAllocator::kNoAccess);
1256 vm->SetPermissions(start, pre_guard_offset, PageAllocator::kNoAccess);
1265 void MemoryChunk::ReleaseAllocatedMemory() {
1266 if (skip_list_ !=
nullptr) {
1268 skip_list_ =
nullptr;
1270 if (mutex_ !=
nullptr) {
1274 if (page_protection_change_mutex_ !=
nullptr) {
1275 delete page_protection_change_mutex_;
1276 page_protection_change_mutex_ =
nullptr;
1278 ReleaseSlotSet<OLD_TO_NEW>();
1279 ReleaseSlotSet<OLD_TO_OLD>();
1280 ReleaseTypedSlotSet<OLD_TO_NEW>();
1281 ReleaseTypedSlotSet<OLD_TO_OLD>();
1282 ReleaseInvalidatedSlots();
1283 if (local_tracker_ !=
nullptr) ReleaseLocalTracker();
1284 if (young_generation_bitmap_ !=
nullptr) ReleaseYoungGenerationBitmap();
1285 if (marking_bitmap_ !=
nullptr) ReleaseMarkingBitmap();
1287 if (IsPagedSpace()) {
1288 Page* page =
static_cast<Page*
>(
this);
1289 page->ReleaseFreeListCategories();
1293 static SlotSet* AllocateAndInitializeSlotSet(
size_t size, Address page_start) {
1294 size_t pages = (size + Page::kPageSize - 1) / Page::kPageSize;
1295 DCHECK_LT(0, pages);
1296 SlotSet* slot_set =
new SlotSet[pages];
1297 for (
size_t i = 0;
i < pages;
i++) {
1298 slot_set[
i].SetPageStart(page_start +
i * Page::kPageSize);
1303 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_NEW>();
1304 template SlotSet* MemoryChunk::AllocateSlotSet<OLD_TO_OLD>();
1306 template <RememberedSetType type>
1307 SlotSet* MemoryChunk::AllocateSlotSet() {
1308 SlotSet* slot_set = AllocateAndInitializeSlotSet(size_, address());
1309 SlotSet* old_slot_set = base::AsAtomicPointer::Release_CompareAndSwap(
1310 &slot_set_[type],
nullptr, slot_set);
1311 if (old_slot_set !=
nullptr) {
1313 slot_set = old_slot_set;
1319 template void MemoryChunk::ReleaseSlotSet<OLD_TO_NEW>();
1320 template void MemoryChunk::ReleaseSlotSet<OLD_TO_OLD>();
1322 template <RememberedSetType type>
1323 void MemoryChunk::ReleaseSlotSet() {
1324 SlotSet* slot_set = slot_set_[type];
1326 slot_set_[type] =
nullptr;
1331 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_NEW>();
1332 template TypedSlotSet* MemoryChunk::AllocateTypedSlotSet<OLD_TO_OLD>();
1334 template <RememberedSetType type>
1335 TypedSlotSet* MemoryChunk::AllocateTypedSlotSet() {
1336 TypedSlotSet* typed_slot_set =
new TypedSlotSet(address());
1337 TypedSlotSet* old_value = base::AsAtomicPointer::Release_CompareAndSwap(
1338 &typed_slot_set_[type],
nullptr, typed_slot_set);
1339 if (old_value !=
nullptr) {
1340 delete typed_slot_set;
1341 typed_slot_set = old_value;
1343 DCHECK(typed_slot_set);
1344 return typed_slot_set;
1347 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_NEW>();
1348 template void MemoryChunk::ReleaseTypedSlotSet<OLD_TO_OLD>();
1350 template <RememberedSetType type>
1351 void MemoryChunk::ReleaseTypedSlotSet() {
1352 TypedSlotSet* typed_slot_set = typed_slot_set_[type];
1353 if (typed_slot_set) {
1354 typed_slot_set_[type] =
nullptr;
1355 delete typed_slot_set;
1359 InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
1360 DCHECK_NULL(invalidated_slots_);
1361 invalidated_slots_ =
new InvalidatedSlots();
1362 return invalidated_slots_;
1365 void MemoryChunk::ReleaseInvalidatedSlots() {
1366 if (invalidated_slots_) {
1367 delete invalidated_slots_;
1368 invalidated_slots_ =
nullptr;
1372 void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject*
object,
1374 if (!ShouldSkipEvacuationSlotRecording()) {
1375 if (invalidated_slots() ==
nullptr) {
1376 AllocateInvalidatedSlots();
1378 int old_size = (*invalidated_slots())[
object];
1379 (*invalidated_slots())[
object] = std::max(old_size, size);
1383 bool MemoryChunk::RegisteredObjectWithInvalidatedSlots(HeapObject*
object) {
1384 if (ShouldSkipEvacuationSlotRecording()) {
1388 if (invalidated_slots() ==
nullptr) {
1391 return invalidated_slots()->find(
object) != invalidated_slots()->end();
1394 void MemoryChunk::MoveObjectWithInvalidatedSlots(HeapObject* old_start,
1395 HeapObject* new_start) {
1396 DCHECK_LT(old_start, new_start);
1397 DCHECK_EQ(MemoryChunk::FromHeapObject(old_start),
1398 MemoryChunk::FromHeapObject(new_start));
1399 if (!ShouldSkipEvacuationSlotRecording() && invalidated_slots()) {
1400 auto it = invalidated_slots()->find(old_start);
1401 if (it != invalidated_slots()->end()) {
1402 int old_size = it->second;
1403 int delta =
static_cast<int>(new_start->address() - old_start->address());
1404 invalidated_slots()->erase(it);
1405 (*invalidated_slots())[new_start] = old_size - delta;
1410 void MemoryChunk::ReleaseLocalTracker() {
1411 DCHECK_NOT_NULL(local_tracker_);
1412 delete local_tracker_;
1413 local_tracker_ =
nullptr;
1416 void MemoryChunk::AllocateYoungGenerationBitmap() {
1417 DCHECK_NULL(young_generation_bitmap_);
1418 young_generation_bitmap_ =
static_cast<Bitmap*
>(calloc(1, Bitmap::kSize));
1421 void MemoryChunk::ReleaseYoungGenerationBitmap() {
1422 DCHECK_NOT_NULL(young_generation_bitmap_);
1423 free(young_generation_bitmap_);
1424 young_generation_bitmap_ =
nullptr;
1427 void MemoryChunk::AllocateMarkingBitmap() {
1428 DCHECK_NULL(marking_bitmap_);
1429 marking_bitmap_ =
static_cast<Bitmap*
>(calloc(1, Bitmap::kSize));
1432 void MemoryChunk::ReleaseMarkingBitmap() {
1433 DCHECK_NOT_NULL(marking_bitmap_);
1434 free(marking_bitmap_);
1435 marking_bitmap_ =
nullptr;
1441 void Space::AddAllocationObserver(AllocationObserver* observer) {
1442 allocation_observers_.push_back(observer);
1443 StartNextInlineAllocationStep();
1446 void Space::RemoveAllocationObserver(AllocationObserver* observer) {
1447 auto it = std::find(allocation_observers_.begin(),
1448 allocation_observers_.end(), observer);
1449 DCHECK(allocation_observers_.end() != it);
1450 allocation_observers_.erase(it);
1451 StartNextInlineAllocationStep();
1454 void Space::PauseAllocationObservers() { allocation_observers_paused_ =
true; }
1456 void Space::ResumeAllocationObservers() {
1457 allocation_observers_paused_ =
false;
1460 void Space::AllocationStep(
int bytes_since_last, Address soon_object,
1462 if (!AllocationObserversActive()) {
1466 DCHECK(!heap()->allocation_step_in_progress());
1467 heap()->set_allocation_step_in_progress(
true);
1468 heap()->CreateFillerObjectAt(soon_object, size, ClearRecordedSlots::kNo);
1469 for (AllocationObserver* observer : allocation_observers_) {
1470 observer->AllocationStep(bytes_since_last, soon_object, size);
1472 heap()->set_allocation_step_in_progress(
false);
1475 intptr_t Space::GetNextInlineAllocationStepSize() {
1476 intptr_t next_step = 0;
1477 for (AllocationObserver* observer : allocation_observers_) {
1478 next_step = next_step ? Min(next_step, observer->bytes_to_next_step())
1479 : observer->bytes_to_next_step();
1481 DCHECK(allocation_observers_.size() == 0 || next_step > 0);
1485 PagedSpace::PagedSpace(Heap* heap, AllocationSpace space,
1486 Executability executable)
1487 : SpaceWithLinearArea(heap, space), executable_(executable) {
1488 area_size_ = MemoryChunkLayout::AllocatableMemoryInMemoryChunk(space);
1489 accounting_stats_.Clear();
1492 void PagedSpace::TearDown() {
1493 while (!memory_chunk_list_.Empty()) {
1494 MemoryChunk* chunk = memory_chunk_list_.front();
1495 memory_chunk_list_.Remove(chunk);
1496 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(chunk);
1498 accounting_stats_.Clear();
1501 void PagedSpace::RefillFreeList() {
1504 if (identity() != OLD_SPACE && identity() != CODE_SPACE &&
1505 identity() != MAP_SPACE && identity() != RO_SPACE) {
1508 MarkCompactCollector* collector = heap()->mark_compact_collector();
1512 while ((p = collector->sweeper()->GetSweptPageSafe(
this)) !=
nullptr) {
1517 DCHECK_NE(
this, p->owner());
1518 PagedSpace* owner =
reinterpret_cast<PagedSpace*
>(p->owner());
1519 base::MutexGuard guard(owner->mutex());
1520 owner->RefineAllocatedBytesAfterSweeping(p);
1521 owner->RemovePage(p);
1522 added += AddPage(p);
1524 base::MutexGuard guard(mutex());
1525 DCHECK_EQ(
this, p->owner());
1526 RefineAllocatedBytesAfterSweeping(p);
1527 added += RelinkFreeListCategories(p);
1529 added += p->wasted_memory();
1530 if (is_local() && (added > kCompactionMemoryWanted))
break;
1535 void PagedSpace::MergeCompactionSpace(CompactionSpace* other) {
1536 base::MutexGuard guard(mutex());
1538 DCHECK(identity() == other->identity());
1541 other->FreeLinearAllocationArea();
1544 DCHECK_EQ(kNullAddress, other->top());
1545 DCHECK_EQ(kNullAddress, other->limit());
1548 for (
auto it = other->begin(); it != other->end();) {
1551 other->RemovePage(p);
1553 DCHECK_EQ(p->AvailableInFreeList(),
1554 p->AvailableInFreeListFromAllocatedBytes());
1556 DCHECK_EQ(0u, other->Size());
1557 DCHECK_EQ(0u, other->Capacity());
1561 size_t PagedSpace::CommittedPhysicalMemory() {
1562 if (!base::OS::HasLazyCommits())
return CommittedMemory();
1563 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1565 for (Page* page : *
this) {
1566 size += page->CommittedPhysicalMemory();
1571 bool PagedSpace::ContainsSlow(Address addr) {
1572 Page* p = Page::FromAddress(addr);
1573 for (Page* page : *
this) {
1574 if (page == p)
return true;
1579 void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
1580 CHECK(page->SweepingDone());
1581 auto marking_state =
1582 heap()->incremental_marking()->non_atomic_marking_state();
1586 size_t old_counter = marking_state->live_bytes(page);
1587 size_t new_counter = page->allocated_bytes();
1588 DCHECK_GE(old_counter, new_counter);
1589 if (old_counter > new_counter) {
1590 DecreaseAllocatedBytes(old_counter - new_counter, page);
1593 heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
1595 marking_state->SetLiveBytes(page, 0);
1598 Page* PagedSpace::RemovePageSafe(
int size_in_bytes) {
1599 base::MutexGuard guard(mutex());
1602 const int minimum_category =
1603 static_cast<int>(FreeList::SelectFreeListCategoryType(size_in_bytes));
1604 Page* page = free_list()->GetPageForCategoryType(kHuge);
1605 if (!page && static_cast<int>(kLarge) >= minimum_category)
1606 page = free_list()->GetPageForCategoryType(kLarge);
1607 if (!page && static_cast<int>(kMedium) >= minimum_category)
1608 page = free_list()->GetPageForCategoryType(kMedium);
1609 if (!page && static_cast<int>(kSmall) >= minimum_category)
1610 page = free_list()->GetPageForCategoryType(kSmall);
1611 if (!page && static_cast<int>(kTiny) >= minimum_category)
1612 page = free_list()->GetPageForCategoryType(kTiny);
1613 if (!page && static_cast<int>(kTiniest) >= minimum_category)
1614 page = free_list()->GetPageForCategoryType(kTiniest);
1615 if (!page)
return nullptr;
1620 size_t PagedSpace::AddPage(Page* page) {
1621 CHECK(page->SweepingDone());
1622 page->set_owner(
this);
1623 memory_chunk_list_.PushBack(page);
1624 AccountCommitted(page->size());
1625 IncreaseCapacity(page->area_size());
1626 IncreaseAllocatedBytes(page->allocated_bytes(), page);
1627 for (
size_t i = 0;
i < ExternalBackingStoreType::kNumTypes;
i++) {
1628 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
1629 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1631 return RelinkFreeListCategories(page);
1634 void PagedSpace::RemovePage(Page* page) {
1635 CHECK(page->SweepingDone());
1636 memory_chunk_list_.Remove(page);
1637 UnlinkFreeListCategories(page);
1638 DecreaseAllocatedBytes(page->allocated_bytes(), page);
1639 DecreaseCapacity(page->area_size());
1640 AccountUncommitted(page->size());
1641 for (
size_t i = 0;
i < ExternalBackingStoreType::kNumTypes;
i++) {
1642 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
1643 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
1647 size_t PagedSpace::ShrinkPageToHighWaterMark(Page* page) {
1648 size_t unused = page->ShrinkToHighWaterMark();
1649 accounting_stats_.DecreaseCapacity(static_cast<intptr_t>(unused));
1650 AccountUncommitted(unused);
1654 void PagedSpace::ResetFreeList() {
1655 for (Page* page : *
this) {
1656 free_list_.EvictFreeListItems(page);
1658 DCHECK(free_list_.IsEmpty());
1661 void PagedSpace::ShrinkImmortalImmovablePages() {
1662 DCHECK(!heap()->deserialization_complete());
1663 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
1664 FreeLinearAllocationArea();
1666 for (Page* page : *
this) {
1667 DCHECK(page->IsFlagSet(Page::NEVER_EVACUATE));
1668 ShrinkPageToHighWaterMark(page);
1672 bool PagedSpace::Expand() {
1675 base::MutexGuard guard(heap()->paged_space(identity())->mutex());
1677 const int size = AreaSize();
1679 if (!heap()->CanExpandOldGeneration(size))
return false;
1682 heap()->memory_allocator()->AllocatePage(size,
this, executable());
1683 if (page ==
nullptr)
return false;
1685 if (!heap()->deserialization_complete()) page->MarkNeverEvacuate();
1687 Free(page->area_start(), page->area_size(),
1688 SpaceAccountingMode::kSpaceAccounted);
1693 int PagedSpace::CountTotalPages() {
1695 for (Page* page : *
this) {
1703 void PagedSpace::ResetFreeListStatistics() {
1704 for (Page* page : *
this) {
1705 page->ResetFreeListStatistics();
1709 void PagedSpace::SetLinearAllocationArea(Address top, Address limit) {
1710 SetTopAndLimit(top, limit);
1711 if (top != kNullAddress && top != limit &&
1712 heap()->incremental_marking()->black_allocation()) {
1713 Page::FromAllocationAreaAddress(top)->CreateBlackArea(top, limit);
1717 void PagedSpace::DecreaseLimit(Address new_limit) {
1718 Address old_limit = limit();
1719 DCHECK_LE(top(), new_limit);
1720 DCHECK_GE(old_limit, new_limit);
1721 if (new_limit != old_limit) {
1722 SetTopAndLimit(top(), new_limit);
1723 Free(new_limit, old_limit - new_limit,
1724 SpaceAccountingMode::kSpaceAccounted);
1725 if (heap()->incremental_marking()->black_allocation()) {
1726 Page::FromAllocationAreaAddress(new_limit)->DestroyBlackArea(new_limit,
1732 Address SpaceWithLinearArea::ComputeLimit(Address start, Address end,
1734 DCHECK_GE(end - start, min_size);
1736 if (heap()->inline_allocation_disabled()) {
1738 return start + min_size;
1739 }
else if (SupportsInlineAllocation() && AllocationObserversActive()) {
1742 size_t step = GetNextInlineAllocationStepSize();
1749 size_t rounded_step;
1750 if (identity() == NEW_SPACE) {
1752 rounded_step = step - 1;
1754 rounded_step = RoundSizeDownToObjectAlignment(static_cast<int>(step));
1756 return Min(static_cast<Address>(start + min_size + rounded_step), end);
1763 void PagedSpace::MarkLinearAllocationAreaBlack() {
1764 DCHECK(heap()->incremental_marking()->black_allocation());
1765 Address current_top = top();
1766 Address current_limit = limit();
1767 if (current_top != kNullAddress && current_top != current_limit) {
1768 Page::FromAllocationAreaAddress(current_top)
1769 ->CreateBlackArea(current_top, current_limit);
1773 void PagedSpace::UnmarkLinearAllocationArea() {
1774 Address current_top = top();
1775 Address current_limit = limit();
1776 if (current_top != kNullAddress && current_top != current_limit) {
1777 Page::FromAllocationAreaAddress(current_top)
1778 ->DestroyBlackArea(current_top, current_limit);
1782 void PagedSpace::FreeLinearAllocationArea() {
1785 Address current_top = top();
1786 Address current_limit = limit();
1787 if (current_top == kNullAddress) {
1788 DCHECK_EQ(kNullAddress, current_limit);
1792 if (heap()->incremental_marking()->black_allocation()) {
1793 Page* page = Page::FromAllocationAreaAddress(current_top);
1796 if (current_top != current_limit) {
1797 IncrementalMarking::MarkingState* marking_state =
1798 heap()->incremental_marking()->marking_state();
1799 marking_state->bitmap(page)->ClearRange(
1800 page->AddressToMarkbitIndex(current_top),
1801 page->AddressToMarkbitIndex(current_limit));
1802 marking_state->IncrementLiveBytes(
1803 page, -static_cast<int>(current_limit - current_top));
1807 InlineAllocationStep(current_top, kNullAddress, kNullAddress, 0);
1808 SetTopAndLimit(kNullAddress, kNullAddress);
1809 DCHECK_GE(current_limit, current_top);
1813 if (identity() == CODE_SPACE) {
1814 heap()->UnprotectAndRegisterMemoryChunk(
1815 MemoryChunk::FromAddress(current_top));
1817 Free(current_top, current_limit - current_top,
1818 SpaceAccountingMode::kSpaceAccounted);
1821 void PagedSpace::ReleasePage(Page* page) {
1823 0, heap()->incremental_marking()->non_atomic_marking_state()->live_bytes(
1825 DCHECK_EQ(page->owner(),
this);
1827 free_list_.EvictFreeListItems(page);
1828 DCHECK(!free_list_.ContainsPageFreeListItems(page));
1830 if (Page::FromAllocationAreaAddress(allocation_info_.top()) == page) {
1831 DCHECK(!top_on_previous_step_);
1832 allocation_info_.Reset(kNullAddress, kNullAddress);
1835 AccountUncommitted(page->size());
1836 accounting_stats_.DecreaseCapacity(page->area_size());
1837 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(page);
1840 void PagedSpace::SetReadAndExecutable() {
1841 DCHECK(identity() == CODE_SPACE);
1842 for (Page* page : *
this) {
1843 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1844 page->SetReadAndExecutable();
1848 void PagedSpace::SetReadAndWritable() {
1849 DCHECK(identity() == CODE_SPACE);
1850 for (Page* page : *
this) {
1851 CHECK(heap()->memory_allocator()->IsMemoryChunkExecutable(page));
1852 page->SetReadAndWritable();
1856 std::unique_ptr<ObjectIterator> PagedSpace::GetObjectIterator() {
1857 return std::unique_ptr<ObjectIterator>(
new HeapObjectIterator(
this));
1860 bool PagedSpace::RefillLinearAllocationAreaFromFreeList(
size_t size_in_bytes) {
1861 DCHECK(IsAligned(size_in_bytes, kPointerSize));
1862 DCHECK_LE(top(), limit());
1864 if (top() != limit()) {
1865 DCHECK_EQ(Page::FromAddress(top()), Page::FromAddress(limit() - 1));
1869 DCHECK_LT(static_cast<size_t>(limit() - top()), size_in_bytes);
1874 FreeLinearAllocationArea();
1877 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
1878 heap()->GCFlagsForIncrementalMarking(),
1879 kGCCallbackScheduleIdleGarbageCollection);
1882 size_t new_node_size = 0;
1883 FreeSpace* new_node = free_list_.Allocate(size_in_bytes, &new_node_size);
1884 if (new_node ==
nullptr)
return false;
1886 DCHECK_GE(new_node_size, size_in_bytes);
1891 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
1895 Page* page = Page::FromAddress(new_node->address());
1896 IncreaseAllocatedBytes(new_node_size, page);
1898 Address start = new_node->address();
1899 Address end = new_node->address() + new_node_size;
1900 Address limit = ComputeLimit(start, end, size_in_bytes);
1901 DCHECK_LE(limit, end);
1902 DCHECK_LE(size_in_bytes, limit - start);
1904 if (identity() == CODE_SPACE) {
1905 heap()->UnprotectAndRegisterMemoryChunk(page);
1907 Free(limit, end - limit, SpaceAccountingMode::kSpaceAccounted);
1909 SetLinearAllocationArea(start, limit);
1915 void PagedSpace::Print() {}
1919 void PagedSpace::Verify(Isolate* isolate, ObjectVisitor* visitor) {
1920 bool allocation_pointer_found_in_space =
1921 (allocation_info_.top() == allocation_info_.limit());
1922 size_t external_space_bytes[kNumTypes];
1923 size_t external_page_bytes[kNumTypes];
1925 for (
int i = 0;
i < kNumTypes;
i++) {
1926 external_space_bytes[
static_cast<ExternalBackingStoreType
>(
i)] = 0;
1929 for (Page* page : *
this) {
1930 CHECK(page->owner() ==
this);
1932 for (
int i = 0;
i < kNumTypes;
i++) {
1933 external_page_bytes[
static_cast<ExternalBackingStoreType
>(
i)] = 0;
1936 if (page == Page::FromAllocationAreaAddress(allocation_info_.top())) {
1937 allocation_pointer_found_in_space =
true;
1939 CHECK(page->SweepingDone());
1940 HeapObjectIterator it(page);
1941 Address end_of_previous_object = page->area_start();
1942 Address top = page->area_end();
1944 for (HeapObject*
object = it.Next();
object !=
nullptr;
1945 object = it.Next()) {
1946 CHECK(end_of_previous_object <= object->address());
1950 Map map =
object->map();
1951 CHECK(map->IsMap());
1952 CHECK(heap()->map_space()->Contains(map) ||
1953 heap()->read_only_space()->Contains(map));
1956 VerifyObject(
object);
1959 object->ObjectVerify(isolate);
1961 if (!FLAG_verify_heap_skip_remembered_set) {
1962 heap()->VerifyRememberedSetFor(
object);
1966 int size =
object->Size();
1967 object->IterateBody(map, size, visitor);
1968 CHECK(object->address() + size <= top);
1969 end_of_previous_object =
object->address() + size;
1971 if (object->IsExternalString()) {
1972 ExternalString external_string = ExternalString::cast(
object);
1973 size_t size = external_string->ExternalPayloadSize();
1974 external_page_bytes[ExternalBackingStoreType::kExternalString] += size;
1975 }
else if (object->IsJSArrayBuffer()) {
1976 JSArrayBuffer* array_buffer = JSArrayBuffer::cast(
object);
1977 if (ArrayBufferTracker::IsTracked(array_buffer)) {
1978 size_t size = array_buffer->byte_length();
1979 external_page_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
1983 for (
int i = 0;
i < kNumTypes;
i++) {
1984 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
1985 CHECK_EQ(external_page_bytes[t], page->ExternalBackingStoreBytes(t));
1986 external_space_bytes[t] += external_page_bytes[t];
1989 for (
int i = 0;
i < kNumTypes;
i++) {
1990 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
1991 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
1993 CHECK(allocation_pointer_found_in_space);
1995 VerifyCountersAfterSweeping();
1999 void PagedSpace::VerifyLiveBytes() {
2000 IncrementalMarking::MarkingState* marking_state =
2001 heap()->incremental_marking()->marking_state();
2002 for (Page* page : *
this) {
2003 CHECK(page->SweepingDone());
2004 HeapObjectIterator it(page);
2006 for (HeapObject*
object = it.Next();
object !=
nullptr;
2007 object = it.Next()) {
2009 if (marking_state->IsBlack(
object)) {
2010 black_size +=
object->Size();
2013 CHECK_LE(black_size, marking_state->live_bytes(page));
2016 #endif // VERIFY_HEAP 2019 void PagedSpace::VerifyCountersAfterSweeping() {
2020 size_t total_capacity = 0;
2021 size_t total_allocated = 0;
2022 for (Page* page : *
this) {
2023 DCHECK(page->SweepingDone());
2024 total_capacity += page->area_size();
2025 HeapObjectIterator it(page);
2026 size_t real_allocated = 0;
2027 for (HeapObject*
object = it.Next();
object !=
nullptr;
2028 object = it.Next()) {
2029 if (!object->IsFiller()) {
2030 real_allocated +=
object->Size();
2033 total_allocated += page->allocated_bytes();
2036 DCHECK_LE(real_allocated, accounting_stats_.AllocatedOnPage(page));
2037 DCHECK_EQ(page->allocated_bytes(), accounting_stats_.AllocatedOnPage(page));
2039 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2040 DCHECK_EQ(total_allocated, accounting_stats_.Size());
2043 void PagedSpace::VerifyCountersBeforeConcurrentSweeping() {
2049 size_t total_capacity = 0;
2050 size_t total_allocated = 0;
2051 auto marking_state =
2052 heap()->incremental_marking()->non_atomic_marking_state();
2053 for (Page* page : *
this) {
2054 size_t page_allocated =
2055 page->SweepingDone()
2056 ? page->allocated_bytes()
2057 :
static_cast<size_t>(marking_state->live_bytes(page));
2058 total_capacity += page->area_size();
2059 total_allocated += page_allocated;
2060 DCHECK_EQ(page_allocated, accounting_stats_.AllocatedOnPage(page));
2062 DCHECK_EQ(total_capacity, accounting_stats_.Capacity());
2063 DCHECK_EQ(total_allocated, accounting_stats_.Size());
2071 size_t initial_semispace_capacity,
2072 size_t max_semispace_capacity)
2073 : SpaceWithLinearArea(heap, NEW_SPACE),
2074 to_space_(heap, kToSpace),
2075 from_space_(heap, kFromSpace) {
2076 DCHECK(initial_semispace_capacity <= max_semispace_capacity);
2078 base::bits::IsPowerOfTwo(static_cast<uint32_t>(max_semispace_capacity)));
2080 to_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2081 from_space_.SetUp(initial_semispace_capacity, max_semispace_capacity);
2082 if (!to_space_.Commit()) {
2083 V8::FatalProcessOutOfMemory(heap->isolate(),
"New space setup");
2085 DCHECK(!from_space_.is_committed());
2086 ResetLinearAllocationArea();
2089 void NewSpace::TearDown() {
2090 allocation_info_.Reset(kNullAddress, kNullAddress);
2092 to_space_.TearDown();
2093 from_space_.TearDown();
2096 void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
2099 void NewSpace::Grow() {
2101 DCHECK(TotalCapacity() < MaximumCapacity());
2102 size_t new_capacity =
2103 Min(MaximumCapacity(),
2104 static_cast<size_t>(FLAG_semi_space_growth_factor) * TotalCapacity());
2105 if (to_space_.GrowTo(new_capacity)) {
2107 if (!from_space_.GrowTo(new_capacity)) {
2110 if (!to_space_.ShrinkTo(from_space_.current_capacity())) {
2113 FATAL(
"inconsistent state");
2117 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2121 void NewSpace::Shrink() {
2122 size_t new_capacity = Max(InitialTotalCapacity(), 2 * Size());
2123 size_t rounded_new_capacity = ::RoundUp(new_capacity, Page::kPageSize);
2124 if (rounded_new_capacity < TotalCapacity() &&
2125 to_space_.ShrinkTo(rounded_new_capacity)) {
2127 from_space_.Reset();
2128 if (!from_space_.ShrinkTo(rounded_new_capacity)) {
2131 if (!to_space_.GrowTo(from_space_.current_capacity())) {
2134 FATAL(
"inconsistent state");
2138 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2141 bool NewSpace::Rebalance() {
2143 return to_space_.EnsureCurrentCapacity() &&
2144 from_space_.EnsureCurrentCapacity();
2147 bool SemiSpace::EnsureCurrentCapacity() {
2148 if (is_committed()) {
2149 const int expected_pages =
2150 static_cast<int>(current_capacity_ / Page::kPageSize);
2151 MemoryChunk* current_page = first_page();
2152 int actual_pages = 0;
2156 while (current_page !=
nullptr && actual_pages < expected_pages) {
2158 current_page = current_page->list_node().next();
2162 while (current_page) {
2163 MemoryChunk* next_current = current_page->list_node().next();
2164 memory_chunk_list_.Remove(current_page);
2167 current_page->SetFlags(0, Page::kIsInNewSpaceMask);
2168 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
2170 current_page = next_current;
2174 IncrementalMarking::NonAtomicMarkingState* marking_state =
2175 heap()->incremental_marking()->non_atomic_marking_state();
2176 while (actual_pages < expected_pages) {
2179 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2180 MemoryChunkLayout::AllocatableMemoryInDataPage(),
this,
2182 if (current_page ==
nullptr)
return false;
2183 DCHECK_NOT_NULL(current_page);
2184 memory_chunk_list_.PushBack(current_page);
2185 marking_state->ClearLiveness(current_page);
2186 current_page->SetFlags(first_page()->GetFlags(),
2187 static_cast<uintptr_t>(Page::kCopyAllFlags));
2188 heap()->CreateFillerObjectAt(current_page->area_start(),
2189 static_cast<int>(current_page->area_size()),
2190 ClearRecordedSlots::kNo);
2196 LinearAllocationArea LocalAllocationBuffer::Close() {
2198 heap_->CreateFillerObjectAt(
2199 allocation_info_.top(),
2200 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2201 ClearRecordedSlots::kNo);
2202 const LinearAllocationArea old_info = allocation_info_;
2203 allocation_info_ = LinearAllocationArea(kNullAddress, kNullAddress);
2206 return LinearAllocationArea(kNullAddress, kNullAddress);
2209 LocalAllocationBuffer::LocalAllocationBuffer(
2210 Heap* heap, LinearAllocationArea allocation_info)
2211 : heap_(heap), allocation_info_(allocation_info) {
2213 heap_->CreateFillerObjectAt(
2214 allocation_info_.top(),
2215 static_cast<int>(allocation_info_.limit() - allocation_info_.top()),
2216 ClearRecordedSlots::kNo);
2221 LocalAllocationBuffer::LocalAllocationBuffer(
2222 const LocalAllocationBuffer& other) {
2227 LocalAllocationBuffer& LocalAllocationBuffer::operator=(
2228 const LocalAllocationBuffer& other) {
2230 heap_ = other.heap_;
2231 allocation_info_ = other.allocation_info_;
2236 const_cast<LocalAllocationBuffer&
>(other).allocation_info_.Reset(
2237 kNullAddress, kNullAddress);
2241 void NewSpace::UpdateLinearAllocationArea() {
2243 DCHECK(!AllocationObserversActive() || top_on_previous_step_ == top());
2245 Address new_top = to_space_.page_low();
2246 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2247 allocation_info_.Reset(new_top, to_space_.page_high());
2250 original_limit_.store(limit(), std::memory_order_relaxed);
2251 original_top_.store(top(), std::memory_order_release);
2252 StartNextInlineAllocationStep();
2253 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2256 void NewSpace::ResetLinearAllocationArea() {
2258 InlineAllocationStep(top(), top(), kNullAddress, 0);
2260 UpdateLinearAllocationArea();
2262 IncrementalMarking::NonAtomicMarkingState* marking_state =
2263 heap()->incremental_marking()->non_atomic_marking_state();
2264 for (Page* p : to_space_) {
2265 marking_state->ClearLiveness(p);
2267 heap()->concurrent_marking()->ClearLiveness(p);
2271 void NewSpace::UpdateInlineAllocationLimit(
size_t min_size) {
2272 Address new_limit = ComputeLimit(top(), to_space_.page_high(), min_size);
2273 allocation_info_.set_limit(new_limit);
2274 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2277 void PagedSpace::UpdateInlineAllocationLimit(
size_t min_size) {
2278 Address new_limit = ComputeLimit(top(), limit(), min_size);
2279 DCHECK_LE(new_limit, limit());
2280 DecreaseLimit(new_limit);
2283 bool NewSpace::AddFreshPage() {
2284 Address top = allocation_info_.top();
2285 DCHECK(!OldSpace::IsAtPageStart(top));
2288 InlineAllocationStep(top, top, kNullAddress, 0);
2290 if (!to_space_.AdvancePage()) {
2296 Address limit = Page::FromAllocationAreaAddress(top)->area_end();
2297 int remaining_in_page =
static_cast<int>(limit - top);
2298 heap()->CreateFillerObjectAt(top, remaining_in_page, ClearRecordedSlots::kNo);
2299 UpdateLinearAllocationArea();
2305 bool NewSpace::AddFreshPageSynchronized() {
2306 base::MutexGuard guard(&mutex_);
2307 return AddFreshPage();
2311 bool NewSpace::EnsureAllocation(
int size_in_bytes,
2312 AllocationAlignment alignment) {
2313 Address old_top = allocation_info_.top();
2314 Address high = to_space_.page_high();
2315 int filler_size = Heap::GetFillToAlign(old_top, alignment);
2316 int aligned_size_in_bytes = size_in_bytes + filler_size;
2318 if (old_top + aligned_size_in_bytes > high) {
2320 if (!AddFreshPage()) {
2324 old_top = allocation_info_.top();
2325 high = to_space_.page_high();
2326 filler_size = Heap::GetFillToAlign(old_top, alignment);
2329 DCHECK(old_top + aligned_size_in_bytes <= high);
2331 if (allocation_info_.limit() < high) {
2336 Address new_top = old_top + aligned_size_in_bytes;
2337 Address soon_object = old_top + filler_size;
2338 InlineAllocationStep(new_top, new_top, soon_object, size_in_bytes);
2339 UpdateInlineAllocationLimit(aligned_size_in_bytes);
2344 size_t LargeObjectSpace::Available() {
2350 void SpaceWithLinearArea::StartNextInlineAllocationStep() {
2351 if (heap()->allocation_step_in_progress()) {
2356 if (AllocationObserversActive()) {
2357 top_on_previous_step_ = top();
2358 UpdateInlineAllocationLimit(0);
2360 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2364 void SpaceWithLinearArea::AddAllocationObserver(AllocationObserver* observer) {
2365 InlineAllocationStep(top(), top(), kNullAddress, 0);
2366 Space::AddAllocationObserver(observer);
2367 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2370 void SpaceWithLinearArea::RemoveAllocationObserver(
2371 AllocationObserver* observer) {
2372 Address top_for_next_step =
2373 allocation_observers_.size() == 1 ? kNullAddress : top();
2374 InlineAllocationStep(top(), top_for_next_step, kNullAddress, 0);
2375 Space::RemoveAllocationObserver(observer);
2376 DCHECK_IMPLIES(top_on_previous_step_, AllocationObserversActive());
2379 void SpaceWithLinearArea::PauseAllocationObservers() {
2381 InlineAllocationStep(top(), kNullAddress, kNullAddress, 0);
2382 Space::PauseAllocationObservers();
2383 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2384 UpdateInlineAllocationLimit(0);
2387 void SpaceWithLinearArea::ResumeAllocationObservers() {
2388 DCHECK_EQ(kNullAddress, top_on_previous_step_);
2389 Space::ResumeAllocationObservers();
2390 StartNextInlineAllocationStep();
2393 void SpaceWithLinearArea::InlineAllocationStep(Address top,
2394 Address top_for_next_step,
2395 Address soon_object,
2397 if (heap()->allocation_step_in_progress()) {
2402 if (top_on_previous_step_) {
2403 if (top < top_on_previous_step_) {
2405 DCHECK_NE(top, kNullAddress);
2406 DCHECK_EQ(Page::FromAllocationAreaAddress(top),
2407 Page::FromAllocationAreaAddress(top_on_previous_step_));
2408 top_on_previous_step_ = top;
2410 int bytes_allocated =
static_cast<int>(top - top_on_previous_step_);
2411 AllocationStep(bytes_allocated, soon_object, static_cast<int>(size));
2412 top_on_previous_step_ = top_for_next_step;
2416 std::unique_ptr<ObjectIterator> NewSpace::GetObjectIterator() {
2417 return std::unique_ptr<ObjectIterator>(
new SemiSpaceIterator(
this));
2423 void NewSpace::Verify(Isolate* isolate) {
2425 DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
2429 Address current = to_space_.first_page()->area_start();
2430 CHECK_EQ(current, to_space_.space_start());
2432 size_t external_space_bytes[kNumTypes];
2433 for (
int i = 0;
i < kNumTypes;
i++) {
2434 external_space_bytes[
static_cast<ExternalBackingStoreType
>(
i)] = 0;
2437 while (current != top()) {
2438 if (!Page::IsAlignedToPageSize(current)) {
2440 CHECK(!Page::FromAllocationAreaAddress(current)->ContainsLimit(top()) ||
2443 HeapObject*
object = HeapObject::FromAddress(current);
2447 Map map =
object->map();
2448 CHECK(map->IsMap());
2449 CHECK(heap()->map_space()->Contains(map) ||
2450 heap()->read_only_space()->Contains(map));
2453 CHECK(!object->IsMap());
2454 CHECK(!object->IsAbstractCode());
2457 object->ObjectVerify(isolate);
2460 VerifyPointersVisitor visitor(heap());
2461 int size =
object->Size();
2462 object->IterateBody(map, size, &visitor);
2464 if (object->IsExternalString()) {
2465 ExternalString external_string = ExternalString::cast(
object);
2466 size_t size = external_string->ExternalPayloadSize();
2467 external_space_bytes[ExternalBackingStoreType::kExternalString] += size;
2468 }
else if (object->IsJSArrayBuffer()) {
2469 JSArrayBuffer* array_buffer = JSArrayBuffer::cast(
object);
2470 if (ArrayBufferTracker::IsTracked(array_buffer)) {
2471 size_t size = array_buffer->byte_length();
2472 external_space_bytes[ExternalBackingStoreType::kArrayBuffer] += size;
2479 Page* page = Page::FromAllocationAreaAddress(current)->next_page();
2480 current = page->area_start();
2484 for (
int i = 0;
i < kNumTypes;
i++) {
2485 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
2486 CHECK_EQ(external_space_bytes[t], ExternalBackingStoreBytes(t));
2490 CHECK_EQ(from_space_.id(), kFromSpace);
2491 CHECK_EQ(to_space_.id(), kToSpace);
2492 from_space_.Verify();
2500 void SemiSpace::SetUp(
size_t initial_capacity,
size_t maximum_capacity) {
2501 DCHECK_GE(maximum_capacity, static_cast<size_t>(Page::kPageSize));
2502 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
2503 current_capacity_ = minimum_capacity_;
2504 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
2509 void SemiSpace::TearDown() {
2511 if (is_committed()) {
2514 current_capacity_ = maximum_capacity_ = 0;
2518 bool SemiSpace::Commit() {
2519 DCHECK(!is_committed());
2520 const int num_pages =
static_cast<int>(current_capacity_ / Page::kPageSize);
2521 for (
int pages_added = 0; pages_added < num_pages; pages_added++) {
2523 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2524 MemoryChunkLayout::AllocatableMemoryInDataPage(),
this,
2526 if (new_page ==
nullptr) {
2527 if (pages_added) RewindPages(pages_added);
2530 memory_chunk_list_.PushBack(new_page);
2533 AccountCommitted(current_capacity_);
2534 if (age_mark_ == kNullAddress) {
2535 age_mark_ = first_page()->area_start();
2542 bool SemiSpace::Uncommit() {
2543 DCHECK(is_committed());
2544 while (!memory_chunk_list_.Empty()) {
2545 MemoryChunk* chunk = memory_chunk_list_.front();
2546 memory_chunk_list_.Remove(chunk);
2547 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(chunk);
2549 current_page_ =
nullptr;
2550 AccountUncommitted(current_capacity_);
2552 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2557 size_t SemiSpace::CommittedPhysicalMemory() {
2558 if (!is_committed())
return 0;
2560 for (Page* p : *
this) {
2561 size += p->CommittedPhysicalMemory();
2566 bool SemiSpace::GrowTo(
size_t new_capacity) {
2567 if (!is_committed()) {
2568 if (!Commit())
return false;
2570 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2571 DCHECK_LE(new_capacity, maximum_capacity_);
2572 DCHECK_GT(new_capacity, current_capacity_);
2573 const size_t delta = new_capacity - current_capacity_;
2574 DCHECK(IsAligned(delta, AllocatePageSize()));
2575 const int delta_pages =
static_cast<int>(delta / Page::kPageSize);
2576 DCHECK(last_page());
2577 IncrementalMarking::NonAtomicMarkingState* marking_state =
2578 heap()->incremental_marking()->non_atomic_marking_state();
2579 for (
int pages_added = 0; pages_added < delta_pages; pages_added++) {
2581 heap()->memory_allocator()->AllocatePage<MemoryAllocator::kPooled>(
2582 MemoryChunkLayout::AllocatableMemoryInDataPage(),
this,
2584 if (new_page ==
nullptr) {
2585 if (pages_added) RewindPages(pages_added);
2588 memory_chunk_list_.PushBack(new_page);
2589 marking_state->ClearLiveness(new_page);
2591 new_page->SetFlags(last_page()->GetFlags(), Page::kCopyOnFlipFlagsMask);
2593 AccountCommitted(delta);
2594 current_capacity_ = new_capacity;
2598 void SemiSpace::RewindPages(
int num_pages) {
2599 DCHECK_GT(num_pages, 0);
2600 DCHECK(last_page());
2601 while (num_pages > 0) {
2602 MemoryChunk* last = last_page();
2603 memory_chunk_list_.Remove(last);
2604 heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(last);
2609 bool SemiSpace::ShrinkTo(
size_t new_capacity) {
2610 DCHECK_EQ(new_capacity & kPageAlignmentMask, 0u);
2611 DCHECK_GE(new_capacity, minimum_capacity_);
2612 DCHECK_LT(new_capacity, current_capacity_);
2613 if (is_committed()) {
2614 const size_t delta = current_capacity_ - new_capacity;
2615 DCHECK(IsAligned(delta, Page::kPageSize));
2616 int delta_pages =
static_cast<int>(delta / Page::kPageSize);
2617 RewindPages(delta_pages);
2618 AccountUncommitted(delta);
2619 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2621 current_capacity_ = new_capacity;
2625 void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
2626 for (Page* page : *
this) {
2627 page->set_owner(
this);
2628 page->SetFlags(flags, mask);
2629 if (id_ == kToSpace) {
2630 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
2631 page->SetFlag(MemoryChunk::IN_TO_SPACE);
2632 page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2633 heap()->incremental_marking()->non_atomic_marking_state()->SetLiveBytes(
2636 page->SetFlag(MemoryChunk::IN_FROM_SPACE);
2637 page->ClearFlag(MemoryChunk::IN_TO_SPACE);
2639 DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
2640 page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
2645 void SemiSpace::Reset() {
2646 DCHECK(first_page());
2647 DCHECK(last_page());
2648 current_page_ = first_page();
2652 void SemiSpace::RemovePage(Page* page) {
2653 if (current_page_ == page) {
2654 if (page->prev_page()) {
2655 current_page_ = page->prev_page();
2658 memory_chunk_list_.Remove(page);
2659 for (
size_t i = 0;
i < ExternalBackingStoreType::kNumTypes;
i++) {
2660 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
2661 DecrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2665 void SemiSpace::PrependPage(Page* page) {
2666 page->SetFlags(current_page()->GetFlags(),
2667 static_cast<uintptr_t>(Page::kCopyAllFlags));
2668 page->set_owner(
this);
2669 memory_chunk_list_.PushFront(page);
2671 for (
size_t i = 0;
i < ExternalBackingStoreType::kNumTypes;
i++) {
2672 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
2673 IncrementExternalBackingStoreBytes(t, page->ExternalBackingStoreBytes(t));
2677 void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
2679 DCHECK(from->first_page());
2680 DCHECK(to->first_page());
2682 intptr_t saved_to_space_flags = to->current_page()->GetFlags();
2685 std::swap(from->current_capacity_, to->current_capacity_);
2686 std::swap(from->maximum_capacity_, to->maximum_capacity_);
2687 std::swap(from->minimum_capacity_, to->minimum_capacity_);
2688 std::swap(from->age_mark_, to->age_mark_);
2689 std::swap(from->committed_, to->committed_);
2690 std::swap(from->memory_chunk_list_, to->memory_chunk_list_);
2691 std::swap(from->current_page_, to->current_page_);
2692 std::swap(from->external_backing_store_bytes_,
2693 to->external_backing_store_bytes_);
2695 to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
2696 from->FixPagesFlags(0, 0);
2699 void SemiSpace::set_age_mark(Address mark) {
2700 DCHECK_EQ(Page::FromAllocationAreaAddress(mark)->owner(),
this);
2703 for (Page* p : PageRange(space_start(), mark)) {
2704 p->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
2708 std::unique_ptr<ObjectIterator> SemiSpace::GetObjectIterator() {
2714 void SemiSpace::Print() {}
2718 void SemiSpace::Verify() {
2719 bool is_from_space = (id_ == kFromSpace);
2720 size_t external_backing_store_bytes[kNumTypes];
2722 for (
int i = 0;
i < kNumTypes;
i++) {
2723 external_backing_store_bytes[
static_cast<ExternalBackingStoreType
>(
i)] = 0;
2726 for (Page* page : *
this) {
2727 CHECK_EQ(page->owner(),
this);
2728 CHECK(page->InNewSpace());
2729 CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
2730 : MemoryChunk::IN_TO_SPACE));
2731 CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
2732 : MemoryChunk::IN_FROM_SPACE));
2733 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
2734 if (!is_from_space) {
2737 if (page->heap()->incremental_marking()->IsMarking()) {
2738 CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2741 !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
2744 for (
int i = 0;
i < kNumTypes;
i++) {
2745 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
2746 external_backing_store_bytes[t] += page->ExternalBackingStoreBytes(t);
2749 CHECK_IMPLIES(page->list_node().prev(),
2750 page->list_node().prev()->list_node().next() == page);
2752 for (
int i = 0;
i < kNumTypes;
i++) {
2753 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
2754 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
2760 void SemiSpace::AssertValidRange(Address start, Address end) {
2762 Page* page = Page::FromAllocationAreaAddress(start);
2763 Page* end_page = Page::FromAllocationAreaAddress(end);
2764 SemiSpace* space =
reinterpret_cast<SemiSpace*
>(page->owner());
2765 DCHECK_EQ(space, end_page->owner());
2769 if (page == end_page) {
2770 DCHECK_LE(start, end);
2772 while (page != end_page) {
2773 page = page->next_page();
2784 SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
2785 Initialize(space->first_allocatable_address(), space->top());
2789 void SemiSpaceIterator::Initialize(Address start, Address end) {
2790 SemiSpace::AssertValidRange(start, end);
2795 size_t NewSpace::CommittedPhysicalMemory() {
2796 if (!base::OS::HasLazyCommits())
return CommittedMemory();
2797 MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
2798 size_t size = to_space_.CommittedPhysicalMemory();
2799 if (from_space_.is_committed()) {
2800 size += from_space_.CommittedPhysicalMemory();
2810 void FreeListCategory::Reset() {
2817 FreeSpace* FreeListCategory::PickNodeFromList(
size_t minimum_size,
2818 size_t* node_size) {
2819 DCHECK(page()->CanAllocate());
2820 FreeSpace* node = top();
2821 if (node ==
nullptr || static_cast<size_t>(node->Size()) < minimum_size) {
2825 set_top(node->next());
2826 *node_size = node->Size();
2827 available_ -= *node_size;
2831 FreeSpace* FreeListCategory::SearchForNodeInList(
size_t minimum_size,
2832 size_t* node_size) {
2833 DCHECK(page()->CanAllocate());
2834 FreeSpace* prev_non_evac_node =
nullptr;
2835 for (FreeSpace* cur_node = top(); cur_node !=
nullptr;
2836 cur_node = cur_node->next()) {
2837 size_t size = cur_node->size();
2838 if (size >= minimum_size) {
2839 DCHECK_GE(available_, size);
2841 if (cur_node == top()) {
2842 set_top(cur_node->next());
2844 if (prev_non_evac_node !=
nullptr) {
2845 MemoryChunk* chunk =
2846 MemoryChunk::FromAddress(prev_non_evac_node->address());
2847 if (chunk->owner()->identity() == CODE_SPACE) {
2848 chunk->heap()->UnprotectAndRegisterMemoryChunk(chunk);
2850 prev_non_evac_node->set_next(cur_node->next());
2856 prev_non_evac_node = cur_node;
2861 void FreeListCategory::Free(Address start,
size_t size_in_bytes,
2863 DCHECK(page()->CanAllocate());
2864 FreeSpace* free_space = FreeSpace::cast(HeapObject::FromAddress(start));
2865 free_space->set_next(top());
2866 set_top(free_space);
2867 available_ += size_in_bytes;
2868 if ((mode == kLinkCategory) && (prev() ==
nullptr) && (next() ==
nullptr)) {
2869 owner()->AddCategory(
this);
2874 void FreeListCategory::RepairFreeList(Heap* heap) {
2875 FreeSpace* n = top();
2876 while (n !=
nullptr) {
2877 ObjectSlot map_location(n->address());
2878 if (*map_location ==
nullptr) {
2879 map_location.store(ReadOnlyRoots(heap).free_space_map());
2881 DCHECK(*map_location == ReadOnlyRoots(heap).free_space_map());
2887 void FreeListCategory::Relink() {
2888 DCHECK(!is_linked());
2889 owner()->AddCategory(
this);
2892 FreeList::FreeList() : wasted_bytes_(0) {
2893 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
2894 categories_[
i] =
nullptr;
2900 void FreeList::Reset() {
2901 ForAllFreeListCategories(
2902 [](FreeListCategory* category) { category->Reset(); });
2903 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
2904 categories_[
i] =
nullptr;
2909 size_t FreeList::Free(Address start,
size_t size_in_bytes, FreeMode mode) {
2910 Page* page = Page::FromAddress(start);
2911 page->DecreaseAllocatedBytes(size_in_bytes);
2914 if (size_in_bytes < kMinBlockSize) {
2915 page->add_wasted_memory(size_in_bytes);
2916 wasted_bytes_ += size_in_bytes;
2917 return size_in_bytes;
2922 FreeListCategoryType type = SelectFreeListCategoryType(size_in_bytes);
2923 page->free_list_category(type)->Free(start, size_in_bytes, mode);
2924 DCHECK_EQ(page->AvailableInFreeList(),
2925 page->AvailableInFreeListFromAllocatedBytes());
2929 FreeSpace* FreeList::FindNodeIn(FreeListCategoryType type,
size_t minimum_size,
2930 size_t* node_size) {
2931 FreeListCategoryIterator it(
this, type);
2932 FreeSpace* node =
nullptr;
2933 while (it.HasNext()) {
2934 FreeListCategory* current = it.Next();
2935 node = current->PickNodeFromList(minimum_size, node_size);
2936 if (node !=
nullptr) {
2937 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2940 RemoveCategory(current);
2945 FreeSpace* FreeList::TryFindNodeIn(FreeListCategoryType type,
2946 size_t minimum_size,
size_t* node_size) {
2947 if (categories_[type] ==
nullptr)
return nullptr;
2949 categories_[type]->PickNodeFromList(minimum_size, node_size);
2950 if (node !=
nullptr) {
2951 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2956 FreeSpace* FreeList::SearchForNodeInList(FreeListCategoryType type,
2958 size_t minimum_size) {
2959 FreeListCategoryIterator it(
this, type);
2960 FreeSpace* node =
nullptr;
2961 while (it.HasNext()) {
2962 FreeListCategory* current = it.Next();
2963 node = current->SearchForNodeInList(minimum_size, node_size);
2964 if (node !=
nullptr) {
2965 DCHECK(IsVeryLong() || Available() == SumFreeLists());
2968 if (current->is_empty()) {
2969 RemoveCategory(current);
2975 FreeSpace* FreeList::Allocate(
size_t size_in_bytes,
size_t* node_size) {
2976 DCHECK_GE(kMaxBlockSize, size_in_bytes);
2977 FreeSpace* node =
nullptr;
2980 FreeListCategoryType type =
2981 SelectFastAllocationFreeListCategoryType(size_in_bytes);
2982 for (
int i = type;
i < kHuge && node ==
nullptr;
i++) {
2983 node = FindNodeIn(static_cast<FreeListCategoryType>(
i), size_in_bytes,
2987 if (node ==
nullptr) {
2990 node = SearchForNodeInList(kHuge, node_size, size_in_bytes);
2993 if (node ==
nullptr && type != kHuge) {
2996 type = SelectFreeListCategoryType(size_in_bytes);
2997 node = TryFindNodeIn(type, size_in_bytes, node_size);
3000 if (node !=
nullptr) {
3001 Page::FromAddress(node->address())->IncreaseAllocatedBytes(*node_size);
3004 DCHECK(IsVeryLong() || Available() == SumFreeLists());
3008 size_t FreeList::EvictFreeListItems(Page* page) {
3010 page->ForAllFreeListCategories([
this, &sum](FreeListCategory* category) {
3011 DCHECK_EQ(
this, category->owner());
3012 sum += category->available();
3013 RemoveCategory(category);
3019 bool FreeList::ContainsPageFreeListItems(Page* page) {
3020 bool contained =
false;
3021 page->ForAllFreeListCategories(
3022 [
this, &contained](FreeListCategory* category) {
3023 if (category->owner() ==
this && category->is_linked()) {
3030 void FreeList::RepairLists(Heap* heap) {
3031 ForAllFreeListCategories(
3032 [heap](FreeListCategory* category) { category->RepairFreeList(heap); });
3035 bool FreeList::AddCategory(FreeListCategory* category) {
3036 FreeListCategoryType type = category->type_;
3037 DCHECK_LT(type, kNumberOfCategories);
3038 FreeListCategory* top = categories_[type];
3040 if (category->is_empty())
return false;
3041 if (top == category)
return false;
3044 if (top !=
nullptr) {
3045 top->set_prev(category);
3047 category->set_next(top);
3048 categories_[type] = category;
3052 void FreeList::RemoveCategory(FreeListCategory* category) {
3053 FreeListCategoryType type = category->type_;
3054 DCHECK_LT(type, kNumberOfCategories);
3055 FreeListCategory* top = categories_[type];
3058 if (top == category) {
3059 categories_[type] = category->next();
3061 if (category->prev() !=
nullptr) {
3062 category->prev()->set_next(category->next());
3064 if (category->next() !=
nullptr) {
3065 category->next()->set_prev(category->prev());
3067 category->set_next(
nullptr);
3068 category->set_prev(
nullptr);
3071 void FreeList::PrintCategories(FreeListCategoryType type) {
3072 FreeListCategoryIterator it(
this, type);
3073 PrintF(
"FreeList[%p, top=%p, %d] ", static_cast<void*>(
this),
3074 static_cast<void*>(categories_[type]), type);
3075 while (it.HasNext()) {
3076 FreeListCategory* current = it.Next();
3077 PrintF(
"%p -> ", static_cast<void*>(current));
3084 size_t FreeListCategory::SumFreeList() {
3086 FreeSpace* cur = top();
3087 while (cur !=
nullptr) {
3088 DCHECK_EQ(cur->map(),
3089 page()->heap()->isolate()->root(RootIndex::kFreeSpaceMap));
3090 sum += cur->relaxed_read_size();
3096 int FreeListCategory::FreeListLength() {
3098 FreeSpace* cur = top();
3099 while (cur !=
nullptr) {
3102 if (length == kVeryLongFreeList)
return length;
3107 bool FreeList::IsVeryLong() {
3109 for (
int i = kFirstCategory;
i < kNumberOfCategories;
i++) {
3110 FreeListCategoryIterator it(
this, static_cast<FreeListCategoryType>(
i));
3111 while (it.HasNext()) {
3112 len += it.Next()->FreeListLength();
3113 if (len >= FreeListCategory::kVeryLongFreeList)
return true;
3123 size_t FreeList::SumFreeLists() {
3125 ForAllFreeListCategories(
3126 [&sum](FreeListCategory* category) { sum += category->SumFreeList(); });
3135 void PagedSpace::PrepareForMarkCompact() {
3138 FreeLinearAllocationArea();
3144 size_t PagedSpace::SizeOfObjects() {
3145 CHECK_GE(limit(), top());
3146 DCHECK_GE(Size(), static_cast<size_t>(limit() - top()));
3147 return Size() - (limit() - top());
3150 bool PagedSpace::SweepAndRetryAllocation(
int size_in_bytes) {
3151 MarkCompactCollector* collector = heap()->mark_compact_collector();
3152 if (collector->sweeping_in_progress()) {
3154 collector->EnsureSweepingCompleted();
3158 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3163 bool CompactionSpace::SweepAndRetryAllocation(
int size_in_bytes) {
3164 MarkCompactCollector* collector = heap()->mark_compact_collector();
3165 if (FLAG_concurrent_sweeping && collector->sweeping_in_progress()) {
3166 collector->sweeper()->ParallelSweepSpace(identity(), 0);
3168 return RefillLinearAllocationAreaFromFreeList(size_in_bytes);
3173 bool PagedSpace::SlowRefillLinearAllocationArea(
int size_in_bytes) {
3174 VMState<GC> state(heap()->isolate());
3175 RuntimeCallTimerScope runtime_timer(
3176 heap()->isolate(), RuntimeCallCounterId::kGC_Custom_SlowAllocateRaw);
3177 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3180 bool CompactionSpace::SlowRefillLinearAllocationArea(
int size_in_bytes) {
3181 return RawSlowRefillLinearAllocationArea(size_in_bytes);
3184 bool PagedSpace::RawSlowRefillLinearAllocationArea(
int size_in_bytes) {
3186 DCHECK_GE(size_in_bytes, 0);
3187 const int kMaxPagesToSweep = 1;
3189 if (RefillLinearAllocationAreaFromFreeList(size_in_bytes))
return true;
3191 MarkCompactCollector* collector = heap()->mark_compact_collector();
3193 if (collector->sweeping_in_progress()) {
3194 if (FLAG_concurrent_sweeping && !is_local() &&
3195 !collector->sweeper()->AreSweeperTasksRunning()) {
3196 collector->EnsureSweepingCompleted();
3204 if (RefillLinearAllocationAreaFromFreeList(
3205 static_cast<size_t>(size_in_bytes)))
3209 int max_freed = collector->sweeper()->ParallelSweepSpace(
3210 identity(), size_in_bytes, kMaxPagesToSweep);
3212 if (max_freed >= size_in_bytes) {
3213 if (RefillLinearAllocationAreaFromFreeList(
3214 static_cast<size_t>(size_in_bytes)))
3217 }
else if (is_local()) {
3220 PagedSpace* main_space = heap()->paged_space(identity());
3221 Page* page = main_space->RemovePageSafe(size_in_bytes);
3222 if (page !=
nullptr) {
3224 if (RefillLinearAllocationAreaFromFreeList(
3225 static_cast<size_t>(size_in_bytes)))
3230 if (heap()->ShouldExpandOldGenerationOnSlowAllocation() && Expand()) {
3231 DCHECK((CountTotalPages() > 1) ||
3232 (static_cast<size_t>(size_in_bytes) <= free_list_.Available()));
3233 return RefillLinearAllocationAreaFromFreeList(
3234 static_cast<size_t>(size_in_bytes));
3240 return SweepAndRetryAllocation(size_in_bytes);
3247 void MapSpace::VerifyObject(HeapObject*
object) { CHECK(object->IsMap()); }
3250 ReadOnlySpace::ReadOnlySpace(Heap* heap)
3251 : PagedSpace(heap, RO_SPACE, NOT_EXECUTABLE),
3252 is_string_padding_cleared_(heap->isolate()->initialized_from_snapshot()) {
3255 void ReadOnlyPage::MakeHeaderRelocatable() {
3256 if (mutex_ !=
nullptr) {
3260 local_tracker_ =
nullptr;
3261 reservation_.Reset();
3266 const size_t page_size = MemoryAllocator::GetCommitPageSize();
3267 const size_t area_start_offset =
3268 RoundUp(MemoryChunkLayout::ObjectStartOffsetInDataPage(), page_size);
3269 MemoryAllocator* memory_allocator = heap()->memory_allocator();
3270 for (Page* p : *
this) {
3271 ReadOnlyPage* page =
static_cast<ReadOnlyPage*
>(p);
3272 if (access == PageAllocator::kRead) {
3273 page->MakeHeaderRelocatable();
3279 memory_allocator->page_allocator(page->executable());
3280 CHECK(SetPermissions(page_allocator, page->address() + area_start_offset,
3281 page->size() - area_start_offset, access));
3289 void ReadOnlySpace::RepairFreeListsAfterDeserialization() {
3290 free_list_.RepairLists(heap());
3294 for (Page* page : *
this) {
3295 int size =
static_cast<int>(page->wasted_memory());
3300 Address start = page->HighWaterMark();
3301 Address end = page->area_end();
3302 if (start < end - size) {
3304 HeapObject* filler = HeapObject::FromAddress(start);
3305 CHECK(filler->IsFiller());
3306 start += filler->Size();
3308 CHECK_EQ(size, static_cast<int>(end - start));
3309 heap()->CreateFillerObjectAt(start, size, ClearRecordedSlots::kNo);
3313 void ReadOnlySpace::ClearStringPaddingIfNeeded() {
3314 if (is_string_padding_cleared_)
return;
3316 WritableScope writable_scope(
this);
3317 for (Page* page : *
this) {
3318 HeapObjectIterator iterator(page);
3319 for (HeapObject* o = iterator.Next(); o !=
nullptr; o = iterator.Next()) {
3320 if (o->IsSeqOneByteString()) {
3321 SeqOneByteString::cast(o)->clear_padding();
3322 }
else if (o->IsSeqTwoByteString()) {
3323 SeqTwoByteString::cast(o)->clear_padding();
3327 is_string_padding_cleared_ =
true;
3330 void ReadOnlySpace::MarkAsReadOnly() {
3331 DCHECK(!is_marked_read_only_);
3332 FreeLinearAllocationArea();
3333 is_marked_read_only_ =
true;
3334 SetPermissionsForPages(PageAllocator::kRead);
3337 void ReadOnlySpace::MarkAsReadWrite() {
3338 DCHECK(is_marked_read_only_);
3339 SetPermissionsForPages(PageAllocator::kReadWrite);
3340 is_marked_read_only_ =
false;
3343 Address LargePage::GetAddressToShrink(Address object_address,
3344 size_t object_size) {
3345 if (executable() == EXECUTABLE) {
3348 size_t used_size = ::RoundUp((object_address - address()) + object_size,
3349 MemoryAllocator::GetCommitPageSize());
3350 if (used_size < CommittedPhysicalMemory()) {
3351 return address() + used_size;
3356 void LargePage::ClearOutOfLiveRangeSlots(Address free_start) {
3357 RememberedSet<OLD_TO_NEW>::RemoveRange(
this, free_start, area_end(),
3358 SlotSet::FREE_EMPTY_BUCKETS);
3359 RememberedSet<OLD_TO_OLD>::RemoveRange(
this, free_start, area_end(),
3360 SlotSet::FREE_EMPTY_BUCKETS);
3361 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(
this, free_start, area_end());
3362 RememberedSet<OLD_TO_OLD>::RemoveRangeTyped(
this, free_start, area_end());
3368 LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
3369 current_ = space->first_page();
3373 HeapObject* LargeObjectIterator::Next() {
3374 if (current_ ==
nullptr)
return nullptr;
3376 HeapObject*
object = current_->GetObject();
3377 current_ = current_->next_page();
3385 LargeObjectSpace::LargeObjectSpace(Heap* heap)
3386 : LargeObjectSpace(heap, LO_SPACE) {}
3388 LargeObjectSpace::LargeObjectSpace(Heap* heap, AllocationSpace
id)
3395 void LargeObjectSpace::TearDown() {
3396 while (!memory_chunk_list_.Empty()) {
3397 LargePage* page = first_page();
3398 LOG(heap()->isolate(),
3399 DeleteEvent(
"LargeObjectChunk",
3400 reinterpret_cast<void*>(page->address())));
3401 memory_chunk_list_.Remove(page);
3402 heap()->memory_allocator()->Free<MemoryAllocator::kFull>(page);
3406 AllocationResult LargeObjectSpace::AllocateRaw(
int object_size) {
3407 return AllocateRaw(object_size, NOT_EXECUTABLE);
3410 AllocationResult LargeObjectSpace::AllocateRaw(
int object_size,
3411 Executability executable) {
3414 if (!heap()->CanExpandOldGeneration(object_size) ||
3415 !heap()->ShouldExpandOldGenerationOnSlowAllocation()) {
3416 return AllocationResult::Retry(identity());
3419 LargePage* page = AllocateLargePage(object_size, executable);
3420 if (page ==
nullptr)
return AllocationResult::Retry(identity());
3421 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3422 HeapObject*
object = page->GetObject();
3423 heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
3424 heap()->GCFlagsForIncrementalMarking(),
3425 kGCCallbackScheduleIdleGarbageCollection);
3426 if (heap()->incremental_marking()->black_allocation()) {
3427 heap()->incremental_marking()->marking_state()->WhiteToBlack(
object);
3430 heap()->incremental_marking()->black_allocation(),
3431 heap()->incremental_marking()->marking_state()->IsBlack(
object));
3432 page->InitializationMemoryFence();
3436 LargePage* LargeObjectSpace::AllocateLargePage(
int object_size,
3437 Executability executable) {
3438 LargePage* page = heap()->memory_allocator()->AllocateLargePage(
3439 object_size,
this, executable);
3440 if (page ==
nullptr)
return nullptr;
3441 DCHECK_GE(page->area_size(),
static_cast<size_t>(object_size));
3443 Register(page, object_size);
3445 HeapObject*
object = page->GetObject();
3447 heap()->CreateFillerObjectAt(object->address(), object_size,
3448 ClearRecordedSlots::kNo);
3449 AllocationStep(object_size, object->address(), object_size);
3454 size_t LargeObjectSpace::CommittedPhysicalMemory() {
3458 return CommittedMemory();
3463 Object* LargeObjectSpace::FindObject(Address a) {
3464 LargePage* page = FindPage(a);
3465 if (page !=
nullptr) {
3466 return page->GetObject();
3471 LargePage* LargeObjectSpace::FindPage(Address a) {
3472 const Address key = MemoryChunk::FromAddress(a)->address();
3473 auto it = chunk_map_.find(key);
3474 if (it != chunk_map_.end()) {
3475 LargePage* page = it->second;
3476 if (page->Contains(a)) {
3484 void LargeObjectSpace::ClearMarkingStateOfLiveObjects() {
3485 IncrementalMarking::NonAtomicMarkingState* marking_state =
3486 heap()->incremental_marking()->non_atomic_marking_state();
3487 LargeObjectIterator it(
this);
3488 for (HeapObject* obj = it.Next(); obj !=
nullptr; obj = it.Next()) {
3489 if (marking_state->IsBlackOrGrey(obj)) {
3490 Marking::MarkWhite(marking_state->MarkBitFrom(obj));
3491 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
3492 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3493 chunk->ResetProgressBar();
3494 marking_state->SetLiveBytes(chunk, 0);
3496 DCHECK(marking_state->IsWhite(obj));
3500 void LargeObjectSpace::InsertChunkMapEntries(LargePage* page) {
3503 base::MutexGuard guard(&chunk_map_mutex_);
3504 for (Address current = reinterpret_cast<Address>(page);
3505 current < reinterpret_cast<Address>(page) + page->size();
3506 current += MemoryChunk::kPageSize) {
3507 chunk_map_[current] = page;
3511 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page) {
3512 RemoveChunkMapEntries(page, page->address());
3515 void LargeObjectSpace::RemoveChunkMapEntries(LargePage* page,
3516 Address free_start) {
3517 for (Address current = ::RoundUp(free_start, MemoryChunk::kPageSize);
3518 current < reinterpret_cast<Address>(page) + page->size();
3519 current += MemoryChunk::kPageSize) {
3520 chunk_map_.erase(current);
3524 void LargeObjectSpace::PromoteNewLargeObject(LargePage* page) {
3525 DCHECK_EQ(page->owner()->identity(), NEW_LO_SPACE);
3526 DCHECK(page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
3527 DCHECK(!page->IsFlagSet(MemoryChunk::IN_TO_SPACE));
3528 size_t object_size =
static_cast<size_t>(page->GetObject()->Size());
3529 reinterpret_cast<NewLargeObjectSpace*
>(page->owner())
3530 ->Unregister(page, object_size);
3531 Register(page, object_size);
3532 page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
3533 page->SetOldGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3534 page->set_owner(
this);
3537 void LargeObjectSpace::Register(LargePage* page,
size_t object_size) {
3538 size_ +=
static_cast<int>(page->size());
3539 AccountCommitted(page->size());
3540 objects_size_ += object_size;
3542 memory_chunk_list_.PushBack(page);
3544 InsertChunkMapEntries(page);
3547 void LargeObjectSpace::Unregister(LargePage* page,
size_t object_size) {
3548 size_ -=
static_cast<int>(page->size());
3549 AccountUncommitted(page->size());
3550 objects_size_ -= object_size;
3552 memory_chunk_list_.Remove(page);
3554 RemoveChunkMapEntries(page);
3557 void LargeObjectSpace::FreeUnmarkedObjects() {
3558 LargePage* current = first_page();
3559 IncrementalMarking::NonAtomicMarkingState* marking_state =
3560 heap()->incremental_marking()->non_atomic_marking_state();
3565 LargePage* next_current = current->next_page();
3566 HeapObject*
object = current->GetObject();
3567 DCHECK(!marking_state->IsGrey(
object));
3568 if (marking_state->IsBlack(
object)) {
3570 size_t size =
static_cast<size_t>(
object->Size());
3571 objects_size_ += size;
3572 if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
3574 DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
3575 current->ClearOutOfLiveRangeSlots(free_start);
3576 RemoveChunkMapEntries(current, free_start);
3577 const size_t bytes_to_free =
3578 current->size() - (free_start - current->address());
3579 heap()->memory_allocator()->PartialFreeMemory(
3580 current, free_start, bytes_to_free,
3581 current->area_start() +
object->Size());
3582 size_ -= bytes_to_free;
3583 AccountUncommitted(bytes_to_free);
3586 memory_chunk_list_.Remove(current);
3589 size_ -=
static_cast<int>(current->size());
3590 AccountUncommitted(current->size());
3593 RemoveChunkMapEntries(current);
3594 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
3597 current = next_current;
3601 bool LargeObjectSpace::Contains(HeapObject*
object) {
3602 Address address =
object->address();
3603 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
3605 bool owned = (chunk->owner() ==
this);
3607 SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
3612 std::unique_ptr<ObjectIterator> LargeObjectSpace::GetObjectIterator() {
3613 return std::unique_ptr<ObjectIterator>(
new LargeObjectIterator(
this));
3619 void LargeObjectSpace::Verify(Isolate* isolate) {
3620 size_t external_backing_store_bytes[kNumTypes];
3622 for (
int i = 0;
i < kNumTypes;
i++) {
3623 external_backing_store_bytes[
static_cast<ExternalBackingStoreType
>(
i)] = 0;
3626 for (LargePage* chunk = first_page(); chunk !=
nullptr;
3627 chunk = chunk->next_page()) {
3630 HeapObject*
object = chunk->GetObject();
3631 Page* page = Page::FromAddress(object->address());
3632 CHECK(object->address() == page->area_start());
3636 Map map =
object->map();
3637 CHECK(map->IsMap());
3638 CHECK(heap()->map_space()->Contains(map) ||
3639 heap()->read_only_space()->Contains(map));
3642 CHECK(object->IsAbstractCode() ||
object->IsSeqString() ||
3643 object->IsExternalString() ||
object->IsThinString() ||
3644 object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
3645 object->IsWeakFixedArray() ||
object->IsWeakArrayList() ||
3646 object->IsPropertyArray() ||
object->IsByteArray() ||
3647 object->IsFeedbackVector() ||
object->IsBigInt() ||
3648 object->IsFreeSpace() ||
object->IsFeedbackMetadata() ||
3649 object->IsContext());
3652 object->ObjectVerify(isolate);
3654 if (!FLAG_verify_heap_skip_remembered_set) {
3655 heap()->VerifyRememberedSetFor(
object);
3659 if (object->IsAbstractCode()) {
3660 VerifyPointersVisitor code_visitor(heap());
3661 object->IterateBody(map, object->Size(), &code_visitor);
3662 }
else if (object->IsFixedArray()) {
3663 FixedArray array = FixedArray::cast(
object);
3664 for (
int j = 0; j < array->length(); j++) {
3665 Object* element = array->get(j);
3666 if (element->IsHeapObject()) {
3667 HeapObject* element_object = HeapObject::cast(element);
3668 CHECK(heap()->Contains(element_object));
3669 CHECK(element_object->map()->IsMap());
3672 }
else if (object->IsPropertyArray()) {
3673 PropertyArray array = PropertyArray::cast(
object);
3674 for (
int j = 0; j < array->length(); j++) {
3675 Object*
property = array->get(j);
3676 if (property->IsHeapObject()) {
3677 HeapObject* property_object = HeapObject::cast(property);
3678 CHECK(heap()->Contains(property_object));
3679 CHECK(property_object->map()->IsMap());
3683 for (
int i = 0;
i < kNumTypes;
i++) {
3684 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
3685 external_backing_store_bytes[t] += chunk->ExternalBackingStoreBytes(t);
3688 for (
int i = 0;
i < kNumTypes;
i++) {
3689 ExternalBackingStoreType t =
static_cast<ExternalBackingStoreType
>(
i);
3690 CHECK_EQ(external_backing_store_bytes[t], ExternalBackingStoreBytes(t));
3696 void LargeObjectSpace::Print() {
3698 LargeObjectIterator it(
this);
3699 for (HeapObject* obj = it.Next(); obj !=
nullptr; obj = it.Next()) {
3704 void Page::Print() {
3706 PrintF(
"Page@%p in %s\n", reinterpret_cast<void*>(this->address()),
3707 this->owner()->name());
3708 printf(
" --------------------------------------\n");
3709 HeapObjectIterator objects(
this);
3710 unsigned mark_size = 0;
3711 for (HeapObject*
object = objects.Next();
object !=
nullptr;
3712 object = objects.Next()) {
3714 heap()->incremental_marking()->marking_state()->IsBlackOrGrey(
object);
3715 PrintF(
" %c ", (is_marked ?
'!' :
' '));
3717 mark_size +=
object->Size();
3719 object->ShortPrint();
3722 printf(
" --------------------------------------\n");
3723 printf(
" Marked: %x, LiveCount: %" V8PRIdPTR
"\n", mark_size,
3724 heap()->incremental_marking()->marking_state()->live_bytes(
this));
3729 NewLargeObjectSpace::NewLargeObjectSpace(Heap* heap)
3730 : LargeObjectSpace(heap, NEW_LO_SPACE) {}
3732 AllocationResult NewLargeObjectSpace::AllocateRaw(
int object_size) {
3734 LargePage* page = AllocateLargePage(object_size, NOT_EXECUTABLE);
3735 if (page ==
nullptr)
return AllocationResult::Retry(identity());
3736 page->SetYoungGenerationPageFlags(heap()->incremental_marking()->IsMarking());
3737 page->SetFlag(MemoryChunk::IN_TO_SPACE);
3738 page->InitializationMemoryFence();
3739 return page->GetObject();
3742 size_t NewLargeObjectSpace::Available() {
3747 void NewLargeObjectSpace::Flip() {
3748 for (LargePage* chunk = first_page(); chunk !=
nullptr;
3749 chunk = chunk->next_page()) {
3750 chunk->SetFlag(MemoryChunk::IN_FROM_SPACE);
3751 chunk->ClearFlag(MemoryChunk::IN_TO_SPACE);
3755 void NewLargeObjectSpace::FreeAllObjects() {
3756 LargePage* current = first_page();
3758 LargePage* next_current = current->next_page();
3759 Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
3760 heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
3762 current = next_current;
3769 CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
3770 : LargeObjectSpace(heap, CODE_LO_SPACE) {}
3772 AllocationResult CodeLargeObjectSpace::AllocateRaw(
int object_size) {
3773 return LargeObjectSpace::AllocateRaw(object_size, EXECUTABLE);
virtual size_t AllocatePageSize()=0
virtual bool DiscardSystemPages(void *address, size_t size)