5 #include "src/heap/mark-compact.h" 7 #include <unordered_map> 9 #include "src/base/utils/random-number-generator.h" 10 #include "src/cancelable-task.h" 11 #include "src/code-stubs.h" 12 #include "src/compilation-cache.h" 13 #include "src/deoptimizer.h" 14 #include "src/execution.h" 15 #include "src/frames-inl.h" 16 #include "src/global-handles.h" 17 #include "src/heap/array-buffer-collector.h" 18 #include "src/heap/array-buffer-tracker-inl.h" 19 #include "src/heap/gc-tracer.h" 20 #include "src/heap/incremental-marking-inl.h" 21 #include "src/heap/invalidated-slots-inl.h" 22 #include "src/heap/item-parallel-job.h" 23 #include "src/heap/local-allocator-inl.h" 24 #include "src/heap/mark-compact-inl.h" 25 #include "src/heap/object-stats.h" 26 #include "src/heap/objects-visiting-inl.h" 27 #include "src/heap/spaces-inl.h" 28 #include "src/heap/sweeper.h" 29 #include "src/heap/worklist.h" 30 #include "src/ic/stub-cache.h" 31 #include "src/objects/hash-table-inl.h" 32 #include "src/objects/js-objects-inl.h" 33 #include "src/objects/maybe-object.h" 34 #include "src/objects/slots-inl.h" 35 #include "src/transitions-inl.h" 36 #include "src/utils-inl.h" 38 #include "src/vm-state-inl.h" 43 const char* Marking::kWhiteBitPattern =
"00";
44 const char* Marking::kBlackBitPattern =
"11";
45 const char* Marking::kGreyBitPattern =
"10";
46 const char* Marking::kImpossibleBitPattern =
"01";
50 STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
59 class MarkingVerifier :
public ObjectVisitor,
public RootVisitor {
61 virtual void Run() = 0;
64 explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
66 virtual Bitmap* bitmap(
const MemoryChunk* chunk) = 0;
68 virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
69 virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
71 virtual bool IsMarked(HeapObject*
object) = 0;
73 virtual bool IsBlackOrGrey(HeapObject*
object) = 0;
75 void VisitPointers(HeapObject* host, ObjectSlot start,
76 ObjectSlot end)
override {
77 VerifyPointers(start, end);
80 void VisitPointers(HeapObject* host, MaybeObjectSlot start,
81 MaybeObjectSlot end)
override {
82 VerifyPointers(start, end);
85 void VisitRootPointers(Root root,
const char* description, ObjectSlot start,
86 ObjectSlot end)
override {
87 VerifyPointers(start, end);
90 void VerifyRoots(VisitMode mode);
91 void VerifyMarkingOnPage(
const Page* page, Address start, Address end);
92 void VerifyMarking(NewSpace* new_space);
93 void VerifyMarking(PagedSpace* paged_space);
94 void VerifyMarking(LargeObjectSpace* lo_space);
99 void MarkingVerifier::VerifyRoots(VisitMode mode) {
100 heap_->IterateStrongRoots(
this, mode);
103 void MarkingVerifier::VerifyMarkingOnPage(
const Page* page, Address start,
106 Address next_object_must_be_here_or_later = start;
107 for (Address current = start; current < end;) {
108 object = HeapObject::FromAddress(current);
110 if (IsBlackOrGrey(
object) &&
111 object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
112 CHECK(IsMarked(
object));
113 CHECK(current >= next_object_must_be_here_or_later);
114 object->Iterate(
this);
115 next_object_must_be_here_or_later = current +
object->Size();
119 bitmap(page)->AllBitsSetInRange(
120 page->AddressToMarkbitIndex(current),
121 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
122 bitmap(page)->AllBitsClearInRange(
123 page->AddressToMarkbitIndex(current + kPointerSize * 2),
124 page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
125 current = next_object_must_be_here_or_later;
127 current += kPointerSize;
132 void MarkingVerifier::VerifyMarking(NewSpace* space) {
133 Address end = space->top();
136 CHECK_EQ(space->first_allocatable_address(),
137 space->first_page()->area_start());
139 PageRange range(space->first_allocatable_address(), end);
140 for (
auto it = range.begin(); it != range.end();) {
141 Page* page = *(it++);
142 Address limit = it != range.end() ? page->area_end() : end;
143 CHECK(limit == end || !page->Contains(end));
144 VerifyMarkingOnPage(page, page->area_start(), limit);
148 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
149 for (Page* p : *space) {
150 VerifyMarkingOnPage(p, p->area_start(), p->area_end());
154 void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
155 LargeObjectIterator it(lo_space);
156 for (HeapObject* obj = it.Next(); obj !=
nullptr; obj = it.Next()) {
157 if (IsBlackOrGrey(obj)) {
163 class FullMarkingVerifier :
public MarkingVerifier {
165 explicit FullMarkingVerifier(Heap* heap)
166 : MarkingVerifier(heap),
168 heap->mark_compact_collector()->non_atomic_marking_state()) {}
170 void Run()
override {
171 VerifyRoots(VISIT_ONLY_STRONG);
172 VerifyMarking(heap_->new_space());
173 VerifyMarking(heap_->old_space());
174 VerifyMarking(heap_->code_space());
175 VerifyMarking(heap_->map_space());
176 VerifyMarking(heap_->lo_space());
177 VerifyMarking(heap_->code_lo_space());
181 Bitmap* bitmap(
const MemoryChunk* chunk)
override {
182 return marking_state_->bitmap(chunk);
185 bool IsMarked(HeapObject*
object)
override {
186 return marking_state_->IsBlack(
object);
189 bool IsBlackOrGrey(HeapObject*
object)
override {
190 return marking_state_->IsBlackOrGrey(
object);
193 void VerifyPointers(ObjectSlot start, ObjectSlot end)
override {
194 for (ObjectSlot current = start; current < end; ++current) {
195 if ((*current)->IsHeapObject()) {
196 HeapObject*
object = HeapObject::cast(*current);
197 CHECK(marking_state_->IsBlackOrGrey(
object));
202 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end)
override {
203 for (MaybeObjectSlot current = start; current < end; ++current) {
205 if ((*current)->GetHeapObjectIfStrong(&
object)) {
206 CHECK(marking_state_->IsBlackOrGrey(
object));
211 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo)
override {
212 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
213 if (!host->IsWeakObject(rinfo->target_object())) {
214 Object* p = rinfo->target_object();
215 VisitPointer(host, ObjectSlot(&p));
220 MarkCompactCollector::NonAtomicMarkingState* marking_state_;
223 class EvacuationVerifier :
public ObjectVisitor,
public RootVisitor {
225 virtual void Run() = 0;
227 void VisitPointers(HeapObject* host, ObjectSlot start,
228 ObjectSlot end)
override {
229 VerifyPointers(start, end);
232 void VisitPointers(HeapObject* host, MaybeObjectSlot start,
233 MaybeObjectSlot end)
override {
234 VerifyPointers(start, end);
237 void VisitRootPointers(Root root,
const char* description, ObjectSlot start,
238 ObjectSlot end)
override {
239 VerifyPointers(start, end);
243 explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
245 inline Heap* heap() {
return heap_; }
247 virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
248 virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
250 void VerifyRoots(VisitMode mode);
251 void VerifyEvacuationOnPage(Address start, Address end);
252 void VerifyEvacuation(NewSpace* new_space);
253 void VerifyEvacuation(PagedSpace* paged_space);
258 void EvacuationVerifier::VerifyRoots(VisitMode mode) {
259 heap_->IterateStrongRoots(
this, mode);
262 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
263 Address current = start;
264 while (current < end) {
265 HeapObject*
object = HeapObject::FromAddress(current);
266 if (!object->IsFiller()) object->Iterate(
this);
267 current +=
object->Size();
271 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
272 PageRange range(space->first_allocatable_address(), space->top());
273 for (
auto it = range.begin(); it != range.end();) {
274 Page* page = *(it++);
275 Address current = page->area_start();
276 Address limit = it != range.end() ? page->area_end() : space->top();
277 CHECK(limit == space->top() || !page->Contains(space->top()));
278 VerifyEvacuationOnPage(current, limit);
282 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
283 for (Page* p : *space) {
284 if (p->IsEvacuationCandidate())
continue;
285 if (p->Contains(space->top())) {
286 CodePageMemoryModificationScope memory_modification_scope(p);
287 heap_->CreateFillerObjectAt(
288 space->top(),
static_cast<int>(space->limit() - space->top()),
289 ClearRecordedSlots::kNo);
291 VerifyEvacuationOnPage(p->area_start(), p->area_end());
295 class FullEvacuationVerifier :
public EvacuationVerifier {
297 explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
299 void Run()
override {
300 VerifyRoots(VISIT_ALL);
301 VerifyEvacuation(heap_->new_space());
302 VerifyEvacuation(heap_->old_space());
303 VerifyEvacuation(heap_->code_space());
304 VerifyEvacuation(heap_->map_space());
308 void VerifyPointers(ObjectSlot start, ObjectSlot end)
override {
309 for (ObjectSlot current = start; current < end; ++current) {
310 if ((*current)->IsHeapObject()) {
311 HeapObject*
object = HeapObject::cast(*current);
312 if (Heap::InNewSpace(
object)) {
313 CHECK(Heap::InToSpace(
object));
315 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
object));
319 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end)
override {
320 for (MaybeObjectSlot current = start; current < end; ++current) {
322 if ((*current)->GetHeapObjectIfStrong(&
object)) {
323 if (Heap::InNewSpace(
object)) {
324 CHECK(Heap::InToSpace(
object));
326 CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
object));
333 #endif // VERIFY_HEAP 339 using MarkCompactMarkingVisitor =
340 MarkingVisitor<FixedArrayVisitationMode::kRegular,
341 TraceRetainingPathMode::kEnabled,
342 MarkCompactCollector::MarkingState>;
346 int NumberOfAvailableCores() {
347 static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
349 DCHECK_GE(num_cores, 1);
350 DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
356 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(
int pages) {
359 FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
360 if (!heap_->CanExpandOldGeneration(
361 static_cast<size_t>(tasks * Page::kPageSize))) {
368 int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(
int pages,
373 const int kMaxPointerUpdateTasks = 8;
374 const int kSlotsPerTask = 600;
375 const int wanted_tasks =
376 (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
377 return FLAG_parallel_pointer_update
378 ? Min(kMaxPointerUpdateTasks,
379 Min(NumberOfAvailableCores(), wanted_tasks))
383 int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
388 return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
392 MarkCompactCollector::MarkCompactCollector(Heap* heap)
393 : MarkCompactCollectorBase(heap),
394 page_parallel_job_semaphore_(0),
398 was_marked_incrementally_(false),
401 black_allocation_(false),
402 have_code_to_deoptimize_(false),
403 marking_worklist_(heap),
404 sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
405 old_to_new_slots_ = -1;
408 MarkCompactCollector::~MarkCompactCollector() {
delete sweeper_; }
410 void MarkCompactCollector::SetUp() {
411 DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern,
"00"));
412 DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern,
"11"));
413 DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern,
"10"));
414 DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern,
"01"));
417 void MarkCompactCollector::TearDown() {
420 if (heap()->incremental_marking()->IsMarking()) {
421 marking_worklist()->Clear();
425 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
426 DCHECK(!p->NeverEvacuate());
427 p->MarkEvacuationCandidate();
428 evacuation_candidates_.push_back(p);
432 static void TraceFragmentation(PagedSpace* space) {
433 int number_of_pages = space->CountTotalPages();
434 intptr_t reserved = (number_of_pages * space->AreaSize());
435 intptr_t free = reserved - space->SizeOfObjects();
436 PrintF(
"[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
437 static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
440 bool MarkCompactCollector::StartCompaction() {
442 DCHECK(evacuation_candidates_.empty());
444 CollectEvacuationCandidates(heap()->old_space());
446 if (FLAG_compact_code_space) {
447 CollectEvacuationCandidates(heap()->code_space());
448 }
else if (FLAG_trace_fragmentation) {
449 TraceFragmentation(heap()->code_space());
452 if (FLAG_trace_fragmentation) {
453 TraceFragmentation(heap()->map_space());
456 compacting_ = !evacuation_candidates_.empty();
462 void MarkCompactCollector::CollectGarbage() {
465 DCHECK(state_ == PREPARE_GC);
467 #ifdef ENABLE_MINOR_MC 468 heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
469 #endif // ENABLE_MINOR_MC 472 ClearNonLiveReferences();
485 void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
486 HeapObjectIterator iterator(space);
487 while (HeapObject*
object = iterator.Next()) {
488 CHECK(non_atomic_marking_state()->IsBlack(
object));
492 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
493 for (Page* p : *space) {
494 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
495 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
500 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
501 for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
502 CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
503 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
507 void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
508 LargeObjectIterator it(space);
509 for (HeapObject* obj = it.Next(); obj !=
nullptr; obj = it.Next()) {
510 CHECK(non_atomic_marking_state()->IsWhite(obj));
511 CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
512 MemoryChunk::FromAddress(obj->address())));
516 void MarkCompactCollector::VerifyMarkbitsAreClean() {
517 VerifyMarkbitsAreClean(heap_->old_space());
518 VerifyMarkbitsAreClean(heap_->code_space());
519 VerifyMarkbitsAreClean(heap_->map_space());
520 VerifyMarkbitsAreClean(heap_->new_space());
523 VerifyMarkbitsAreDirty(heap_->read_only_space());
524 VerifyMarkbitsAreClean(heap_->lo_space());
525 VerifyMarkbitsAreClean(heap_->code_lo_space());
526 VerifyMarkbitsAreClean(heap_->new_lo_space());
529 #endif // VERIFY_HEAP 531 void MarkCompactCollector::EnsureSweepingCompleted() {
532 if (!sweeper()->sweeping_in_progress())
return;
534 sweeper()->EnsureCompleted();
535 heap()->old_space()->RefillFreeList();
536 heap()->code_space()->RefillFreeList();
537 heap()->map_space()->RefillFreeList();
540 if (FLAG_verify_heap && !evacuation()) {
541 FullEvacuationVerifier verifier(heap());
547 void MarkCompactCollector::ComputeEvacuationHeuristics(
548 size_t area_size,
int* target_fragmentation_percent,
549 size_t* max_evacuated_bytes) {
552 const int kTargetFragmentationPercentForReduceMemory = 20;
553 const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
554 const int kTargetFragmentationPercentForOptimizeMemory = 20;
555 const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
560 const int kTargetFragmentationPercent = 70;
561 const size_t kMaxEvacuatedBytes = 4 * MB;
564 const float kTargetMsPerArea = .5;
566 if (heap()->ShouldReduceMemory()) {
567 *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
568 *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
569 }
else if (heap()->ShouldOptimizeForMemoryUsage()) {
570 *target_fragmentation_percent =
571 kTargetFragmentationPercentForOptimizeMemory;
572 *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
574 const double estimated_compaction_speed =
575 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
576 if (estimated_compaction_speed != 0) {
579 const double estimated_ms_per_area =
580 1 + area_size / estimated_compaction_speed;
581 *target_fragmentation_percent =
static_cast<int>(
582 100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
583 if (*target_fragmentation_percent <
584 kTargetFragmentationPercentForReduceMemory) {
585 *target_fragmentation_percent =
586 kTargetFragmentationPercentForReduceMemory;
589 *target_fragmentation_percent = kTargetFragmentationPercent;
591 *max_evacuated_bytes = kMaxEvacuatedBytes;
595 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
596 DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
598 int number_of_pages = space->CountTotalPages();
599 size_t area_size = space->AreaSize();
602 typedef std::pair<size_t, Page*> LiveBytesPagePair;
603 std::vector<LiveBytesPagePair> pages;
604 pages.reserve(number_of_pages);
606 DCHECK(!sweeping_in_progress());
607 Page* owner_of_linear_allocation_area =
608 space->top() == space->limit()
610 : Page::FromAllocationAreaAddress(space->top());
611 for (Page* p : *space) {
612 if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
619 CHECK(!p->IsEvacuationCandidate());
620 CHECK_NULL(p->slot_set<OLD_TO_OLD>());
621 CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
622 CHECK(p->SweepingDone());
623 DCHECK(p->area_size() == area_size);
624 pages.push_back(std::make_pair(p->allocated_bytes(), p));
627 int candidate_count = 0;
628 size_t total_live_bytes = 0;
630 const bool reduce_memory = heap()->ShouldReduceMemory();
631 if (FLAG_manual_evacuation_candidates_selection) {
632 for (
size_t i = 0;
i < pages.size();
i++) {
633 Page* p = pages[
i].second;
634 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
636 total_live_bytes += pages[
i].first;
637 p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
638 AddEvacuationCandidate(p);
641 }
else if (FLAG_stress_compaction_random) {
642 double fraction = isolate()->fuzzer_rng()->NextDouble();
643 size_t pages_to_mark_count =
644 static_cast<size_t>(fraction * (pages.size() + 1));
645 for (uint64_t
i : isolate()->fuzzer_rng()->NextSample(
646 pages.size(), pages_to_mark_count)) {
648 total_live_bytes += pages[
i].first;
649 AddEvacuationCandidate(pages[
i].second);
651 }
else if (FLAG_stress_compaction) {
652 for (
size_t i = 0;
i < pages.size();
i++) {
653 Page* p = pages[
i].second;
656 total_live_bytes += pages[
i].first;
657 AddEvacuationCandidate(p);
674 size_t max_evacuated_bytes;
675 int target_fragmentation_percent;
676 ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
677 &max_evacuated_bytes);
679 const size_t free_bytes_threshold =
680 target_fragmentation_percent * (area_size / 100);
687 std::sort(pages.begin(), pages.end(),
688 [](
const LiveBytesPagePair& a,
const LiveBytesPagePair& b) {
689 return a.first < b.first;
691 for (
size_t i = 0;
i < pages.size();
i++) {
692 size_t live_bytes = pages[
i].first;
693 DCHECK_GE(area_size, live_bytes);
694 size_t free_bytes = area_size - live_bytes;
695 if (FLAG_always_compact ||
696 ((free_bytes >= free_bytes_threshold) &&
697 ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
699 total_live_bytes += live_bytes;
701 if (FLAG_trace_fragmentation_verbose) {
702 PrintIsolate(isolate(),
703 "compaction-selection-page: space=%s free_bytes_page=%zu " 704 "fragmentation_limit_kb=%" PRIuS
705 " fragmentation_limit_percent=%d sum_compaction_kb=%zu " 706 "compaction_limit_kb=%zu\n",
707 space->name(), free_bytes / KB, free_bytes_threshold / KB,
708 target_fragmentation_percent, total_live_bytes / KB,
709 max_evacuated_bytes / KB);
714 int estimated_new_pages =
715 static_cast<int>((total_live_bytes + area_size - 1) / area_size);
716 DCHECK_LE(estimated_new_pages, candidate_count);
717 int estimated_released_pages = candidate_count - estimated_new_pages;
719 if ((estimated_released_pages == 0) && !FLAG_always_compact) {
722 for (
int i = 0;
i < candidate_count;
i++) {
723 AddEvacuationCandidate(pages[
i].second);
727 if (FLAG_trace_fragmentation) {
728 PrintIsolate(isolate(),
729 "compaction-selection: space=%s reduce_memory=%d pages=%d " 730 "total_live_bytes=%zu\n",
731 space->name(), reduce_memory, candidate_count,
732 total_live_bytes / KB);
737 void MarkCompactCollector::AbortCompaction() {
739 RememberedSet<OLD_TO_OLD>::ClearAll(heap());
740 for (Page* p : evacuation_candidates_) {
741 p->ClearEvacuationCandidate();
744 evacuation_candidates_.clear();
746 DCHECK(evacuation_candidates_.empty());
750 void MarkCompactCollector::Prepare() {
751 was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
754 DCHECK(state_ == IDLE);
758 DCHECK(!FLAG_never_compact || !FLAG_always_compact);
761 EnsureSweepingCompleted();
763 if (heap()->incremental_marking()->IsSweeping()) {
764 heap()->incremental_marking()->Stop();
767 heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
769 if (!was_marked_incrementally_) {
770 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
771 heap_->local_embedder_heap_tracer()->TracePrologue();
776 if (!FLAG_never_compact && !was_marked_incrementally_) {
780 PagedSpaces spaces(heap());
781 for (PagedSpace* space = spaces.next(); space !=
nullptr;
782 space = spaces.next()) {
783 space->PrepareForMarkCompact();
785 heap()->account_external_memory_concurrently_freed();
788 if (!was_marked_incrementally_ && FLAG_verify_heap) {
789 VerifyMarkbitsAreClean();
794 void MarkCompactCollector::FinishConcurrentMarking(
795 ConcurrentMarking::StopRequest stop_request) {
798 if (FLAG_parallel_marking || FLAG_concurrent_marking) {
799 heap()->concurrent_marking()->Stop(stop_request);
800 heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
804 void MarkCompactCollector::VerifyMarking() {
805 CHECK(marking_worklist()->IsEmpty());
806 DCHECK(heap_->incremental_marking()->IsStopped());
808 if (FLAG_verify_heap) {
809 FullMarkingVerifier verifier(heap());
814 if (FLAG_verify_heap) {
815 heap()->old_space()->VerifyLiveBytes();
816 heap()->map_space()->VerifyLiveBytes();
817 heap()->code_space()->VerifyLiveBytes();
822 void MarkCompactCollector::Finish() {
823 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
826 heap()->VerifyCountersBeforeConcurrentSweeping();
829 CHECK(weak_objects_.current_ephemerons.IsEmpty());
830 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
831 weak_objects_.next_ephemerons.Clear();
833 sweeper()->StartSweeperTasks();
834 sweeper()->StartIterabilityTasks();
837 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
838 heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
841 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
844 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
850 isolate()->load_stub_cache()->Clear();
851 isolate()->store_stub_cache()->Clear();
853 if (have_code_to_deoptimize_) {
855 Deoptimizer::DeoptimizeMarkedCode(isolate());
856 have_code_to_deoptimize_ =
false;
863 : collector_(collector) {}
865 void VisitRootPointer(Root root,
const char* description,
867 MarkObjectByPointer(root, p);
870 void VisitRootPointers(Root root,
const char* description,
ObjectSlot start,
872 for (
ObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
876 V8_INLINE
void MarkObjectByPointer(Root root,
ObjectSlot p) {
877 if (!(*p)->IsHeapObject())
return;
879 collector_->MarkRootObject(root, HeapObject::cast(*p));
898 : collector_(collector) {}
901 MarkObject(host, *p);
906 DCHECK(!HasWeakHeapObjectTag(*p));
907 MarkObject(host, *p);
921 if (!object->IsHeapObject())
return;
922 collector_->MarkObject(host, HeapObject::cast(
object));
931 : heap_(heap), pointers_removed_(0), table_(table) {}
938 heap_->mark_compact_collector()->non_atomic_marking_state();
941 if (o->IsHeapObject()) {
942 HeapObject* heap_object = HeapObject::cast(o);
943 if (marking_state->IsWhite(heap_object)) {
949 DCHECK(!Heap::InNewSpace(o));
950 MarkCompactCollector::RecordSlot(table_, p, heap_object);
961 int PointersRemoved() {
962 return pointers_removed_;
967 int pointers_removed_;
975 void VisitRootPointers(Root root,
const char* description,
ObjectSlot start,
979 heap_->mark_compact_collector()->non_atomic_marking_state();
983 if (o->IsHeapObject()) {
984 HeapObject* heap_object = HeapObject::cast(o);
985 if (marking_state->IsWhite(heap_object)) {
986 if (o->IsExternalString()) {
987 heap_->FinalizeExternalString(String::cast(o));
990 DCHECK(o->IsThinString());
1009 : marking_state_(marking_state) {}
1012 HeapObject* heap_object = HeapObject::cast(
object);
1013 DCHECK(!marking_state_->IsGrey(heap_object));
1014 if (marking_state_->IsBlack(heap_object)) {
1016 }
else if (object->IsAllocationSite() &&
1017 !(AllocationSite::cast(
object)->IsZombie())) {
1022 while (nested->IsAllocationSite()) {
1026 nested = current_site->nested_site();
1027 current_site->MarkZombie();
1028 marking_state_->WhiteToBlack(current_site);
1044 : collector_(collector) {}
1047 DCHECK(!HasWeakHeapObjectTag(*p));
1048 RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
1052 RecordMigratedSlot(host, *p, p.address());
1057 while (start < end) {
1058 VisitPointer(host, start);
1065 while (start < end) {
1066 VisitPointer(host, start);
1071 inline void VisitCodeTarget(
Code host,
RelocInfo* rinfo)
override {
1072 DCHECK_EQ(host, rinfo->host());
1073 DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1074 Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1077 DCHECK(!Heap::InNewSpace(target));
1078 collector_->RecordRelocSlot(host, rinfo, target);
1081 inline void VisitEmbeddedPointer(
Code host,
RelocInfo* rinfo)
override {
1082 DCHECK_EQ(host, rinfo->host());
1083 DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1084 HeapObject*
object = HeapObject::cast(rinfo->target_object());
1085 GenerationalBarrierForCode(host, rinfo,
object);
1086 collector_->RecordRelocSlot(host, rinfo,
object);
1090 inline void VisitExternalReference(
Code host,
RelocInfo* rinfo)
final {}
1091 inline void VisitExternalReference(
Foreign* host,
Address* p)
final {}
1092 inline void VisitRuntimeEntry(
Code host,
RelocInfo* rinfo)
final {}
1093 inline void VisitInternalReference(
Code host,
RelocInfo* rinfo)
final {}
1098 if (value->IsStrongOrWeak()) {
1099 Page* p = Page::FromAddress(value.ptr());
1100 if (p->InNewSpace()) {
1101 DCHECK_IMPLIES(p->InToSpace(),
1102 p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1104 Page::FromAddress(slot), slot);
1105 }
else if (p->IsEvacuationCandidate()) {
1107 Page::FromAddress(slot), slot);
1133 if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1134 PROFILE(heap_->isolate(),
1135 CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1137 heap_->OnMoveEvent(dst, src, size);
1144 virtual bool Visit(
HeapObject*
object,
int size) = 0;
1150 migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1151 observers_.push_back(observer);
1155 enum MigrationMode { kFast, kObserved };
1159 AllocationSpace dest);
1161 template <MigrationMode mode>
1164 AllocationSpace dest) {
1165 Address dst_addr = dst->address();
1166 Address src_addr = src->address();
1167 DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1168 DCHECK_NE(dest, LO_SPACE);
1169 DCHECK_NE(dest, CODE_LO_SPACE);
1170 if (dest == OLD_SPACE) {
1171 DCHECK_OBJECT_SIZE(size);
1172 DCHECK(IsAligned(size, kPointerSize));
1173 base->heap_->CopyBlock(dst_addr, src_addr, size);
1174 if (mode != MigrationMode::kFast)
1175 base->ExecuteMigrationObservers(dest, src, dst, size);
1176 dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1177 }
else if (dest == CODE_SPACE) {
1178 DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1179 base->heap_->CopyBlock(dst_addr, src_addr, size);
1180 Code::cast(dst)->Relocate(dst_addr - src_addr);
1181 if (mode != MigrationMode::kFast)
1182 base->ExecuteMigrationObservers(dest, src, dst, size);
1183 dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1185 DCHECK_OBJECT_SIZE(size);
1186 DCHECK(dest == NEW_SPACE);
1187 base->heap_->CopyBlock(dst_addr, src_addr, size);
1188 if (mode != MigrationMode::kFast)
1189 base->ExecuteMigrationObservers(dest, src, dst, size);
1191 base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1192 static_cast<base::AtomicWord>(dst_addr));
1198 local_allocator_(local_allocator),
1199 record_visitor_(record_visitor) {
1200 migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1203 inline bool TryEvacuateObject(AllocationSpace target_space,
1207 if (AbortCompactionForTesting(
object))
return false;
1208 #endif // VERIFY_HEAP 1209 AllocationAlignment alignment =
1210 HeapObject::RequiredAlignment(object->map());
1212 local_allocator_->Allocate(target_space, size, alignment);
1213 if (allocation.To(target_object)) {
1214 MigrateObject(*target_object,
object, size, target_space);
1220 inline void ExecuteMigrationObservers(AllocationSpace dest,
HeapObject* src,
1223 obs->Move(dest, src, dst, size);
1228 AllocationSpace dest) {
1229 migration_function_(
this, dst, src, size, dest);
1233 bool AbortCompactionForTesting(
HeapObject*
object) {
1234 if (FLAG_stress_compaction) {
1236 kPageAlignmentMask & ~kObjectAlignmentMask;
1237 if ((object->address() & kPageAlignmentMask) == mask) {
1238 Page* page = Page::FromAddress(object->address());
1239 if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1240 page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1242 page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1249 #endif // VERIFY_HEAP 1254 std::vector<MigrationObserver*> observers_;
1255 MigrateFunction migration_function_;
1263 Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1265 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1267 semispace_copied_size_(0),
1268 local_pretenuring_feedback_(local_pretenuring_feedback),
1269 is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
1271 inline bool Visit(
HeapObject*
object,
int size)
override {
1272 if (TryEvacuateWithoutCopy(
object))
return true;
1274 if (heap_->ShouldBePromoted(object->address()) &&
1275 TryEvacuateObject(OLD_SPACE,
object, size, &target_object)) {
1276 promoted_size_ += size;
1279 heap_->UpdateAllocationSite(object->map(), object,
1280 local_pretenuring_feedback_);
1282 AllocationSpace space = AllocateTargetObject(
object, size, &target);
1283 MigrateObject(HeapObject::cast(target),
object, size, space);
1284 semispace_copied_size_ += size;
1288 intptr_t promoted_size() {
return promoted_size_; }
1289 intptr_t semispace_copied_size() {
return semispace_copied_size_; }
1292 inline bool TryEvacuateWithoutCopy(
HeapObject*
object) {
1293 if (is_incremental_marking_)
return false;
1295 Map map =
object->map();
1298 if (map->visitor_id() == kVisitThinString) {
1299 HeapObject* actual = ThinString::cast(
object)->unchecked_actual();
1300 if (MarkCompactCollector::IsOnEvacuationCandidate(actual))
return false;
1301 object->map_slot().Relaxed_Store(
1302 MapWord::FromForwardingAddress(actual).ToMap());
1310 inline AllocationSpace AllocateTargetObject(
HeapObject* old_object,
int size,
1312 AllocationAlignment alignment =
1313 HeapObject::RequiredAlignment(old_object->map());
1314 AllocationSpace space_allocated_in = NEW_SPACE;
1316 local_allocator_->Allocate(NEW_SPACE, size, alignment);
1317 if (allocation.IsRetry()) {
1318 allocation = AllocateInOldSpace(size, alignment);
1319 space_allocated_in = OLD_SPACE;
1321 bool ok = allocation.To(target_object);
1324 return space_allocated_in;
1328 AllocationAlignment alignment) {
1330 local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1331 if (allocation.IsRetry()) {
1332 heap_->FatalProcessOutOfMemory(
1333 "MarkCompactCollector: semi-space copy, fallback in old gen");
1339 intptr_t promoted_size_;
1340 intptr_t semispace_copied_size_;
1341 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1342 bool is_incremental_marking_;
1345 template <PageEvacuationMode mode>
1350 Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1352 record_visitor_(record_visitor),
1354 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1356 static void Move(
Page* page) {
1359 page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1360 page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1363 page->heap()->new_space()->from_space().RemovePage(page);
1364 Page* new_page = Page::ConvertNewToOld(page);
1365 DCHECK(!new_page->InNewSpace());
1366 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1372 inline bool Visit(
HeapObject*
object,
int size)
override {
1373 if (mode == NEW_TO_NEW) {
1374 heap_->UpdateAllocationSite(object->map(), object,
1375 local_pretenuring_feedback_);
1376 }
else if (mode == NEW_TO_OLD) {
1377 object->IterateBodyFast(record_visitor_);
1382 intptr_t moved_bytes() {
return moved_bytes_; }
1383 void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1388 intptr_t moved_bytes_;
1389 Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1398 inline bool Visit(
HeapObject*
object,
int size)
override {
1400 if (TryEvacuateObject(
1401 Page::FromAddress(object->address())->owner()->identity(), object,
1402 size, &target_object)) {
1403 DCHECK(object->map_word().IsForwardingAddress());
1414 inline bool Visit(
HeapObject*
object,
int size)
override {
1416 object->IterateBodyFast(&visitor);
1424 bool MarkCompactCollector::IsUnmarkedHeapObject(
Heap* heap,
ObjectSlot p) {
1426 if (!o->IsHeapObject())
return false;
1427 HeapObject* heap_object = HeapObject::cast(o);
1428 return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1432 void MarkCompactCollector::MarkStringTable(
1433 ObjectVisitor* custom_root_body_visitor) {
1434 StringTable string_table = heap()->string_table();
1436 if (marking_state()->WhiteToBlack(string_table)) {
1438 string_table->IteratePrefix(custom_root_body_visitor);
1442 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1443 ObjectVisitor* custom_root_body_visitor) {
1446 heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1449 MarkStringTable(custom_root_body_visitor);
1450 ProcessTopOptimizedFrame(custom_root_body_visitor);
1453 void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1454 bool work_to_do =
true;
1456 int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1458 while (work_to_do) {
1459 PerformWrapperTracing();
1461 if (iterations >= max_iterations) {
1463 ProcessEphemeronsLinear();
1469 weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1470 heap()->concurrent_marking()->set_ephemeron_marked(
false);
1473 TRACE_GC(heap()->tracer(),
1474 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1476 if (FLAG_parallel_marking) {
1477 heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1480 work_to_do = ProcessEphemerons();
1481 FinishConcurrentMarking(
1482 ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1485 CHECK(weak_objects_.current_ephemerons.IsEmpty());
1486 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1488 work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
1489 heap()->concurrent_marking()->ephemeron_marked() ||
1490 !marking_worklist()->IsEmbedderEmpty() ||
1491 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1495 CHECK(marking_worklist()->IsEmpty());
1496 CHECK(weak_objects_.current_ephemerons.IsEmpty());
1497 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1500 bool MarkCompactCollector::ProcessEphemerons() {
1501 Ephemeron ephemeron;
1502 bool ephemeron_marked =
false;
1506 while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1507 if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1508 ephemeron_marked =
true;
1514 ProcessMarkingWorklist();
1519 while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1520 if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1521 ephemeron_marked =
true;
1526 weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
1527 weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1529 return ephemeron_marked;
1532 void MarkCompactCollector::ProcessEphemeronsLinear() {
1533 TRACE_GC(heap()->tracer(),
1534 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1535 CHECK(heap()->concurrent_marking()->IsStopped());
1536 std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
1537 Ephemeron ephemeron;
1539 DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1540 weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1542 while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1543 VisitEphemeron(ephemeron.key, ephemeron.value);
1545 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1546 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1550 ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1551 bool work_to_do =
true;
1553 while (work_to_do) {
1554 PerformWrapperTracing();
1556 ResetNewlyDiscovered();
1557 ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1560 TRACE_GC(heap()->tracer(),
1561 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1564 ProcessMarkingWorklistInternal<
1565 MarkCompactCollector::MarkingWorklistProcessingMode::
1566 kTrackNewlyDiscoveredObjects>();
1569 while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1570 VisitEphemeron(ephemeron.key, ephemeron.value);
1572 if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1573 key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1577 if (ephemeron_marking_.newly_discovered_overflowed) {
1580 weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1581 if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1582 non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1583 marking_worklist()->Push(ephemeron.value);
1591 for (HeapObject*
object : ephemeron_marking_.newly_discovered) {
1592 auto range = key_to_values.equal_range(
object);
1593 for (
auto it = range.first; it != range.second; ++it) {
1594 HeapObject* value = it->second;
1595 MarkObject(
object, value);
1604 work_to_do = !marking_worklist()->IsEmpty() ||
1605 !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1606 CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1609 ResetNewlyDiscovered();
1610 ephemeron_marking_.newly_discovered.shrink_to_fit();
1612 CHECK(marking_worklist()->IsEmpty());
1615 void MarkCompactCollector::PerformWrapperTracing() {
1616 if (heap_->local_embedder_heap_tracer()->InUse()) {
1617 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
1619 LocalEmbedderHeapTracer::ProcessingScope scope(
1620 heap_->local_embedder_heap_tracer());
1622 while (marking_worklist()->embedder()->Pop(kMainThread, &
object)) {
1623 scope.TracePossibleWrapper(JSObject::cast(
object));
1626 heap_->local_embedder_heap_tracer()->Trace(
1627 std::numeric_limits<double>::infinity());
1631 void MarkCompactCollector::ProcessMarkingWorklist() {
1632 ProcessMarkingWorklistInternal<
1633 MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
1636 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
1637 void MarkCompactCollector::ProcessMarkingWorklistInternal() {
1639 MarkCompactMarkingVisitor visitor(
this, marking_state());
1640 while ((
object = marking_worklist()->Pop()) !=
nullptr) {
1641 DCHECK(!object->IsFiller());
1642 DCHECK(object->IsHeapObject());
1643 DCHECK(heap()->Contains(
object));
1644 DCHECK(!(marking_state()->IsWhite(
object)));
1645 marking_state()->GreyToBlack(
object);
1646 if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1647 kTrackNewlyDiscoveredObjects) {
1648 AddNewlyDiscovered(
object);
1650 Map map =
object->map();
1651 MarkObject(
object, map);
1652 visitor.Visit(map,
object);
1654 DCHECK(marking_worklist()->IsBailoutEmpty());
1657 bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
1658 if (marking_state()->IsBlackOrGrey(key)) {
1659 if (marking_state()->WhiteToGrey(value)) {
1660 marking_worklist()->Push(value);
1664 }
else if (marking_state()->IsWhite(value)) {
1665 weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
1671 void MarkCompactCollector::ProcessEphemeronMarking() {
1672 DCHECK(marking_worklist()->IsEmpty());
1676 weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1678 ProcessEphemeronsUntilFixpoint();
1680 CHECK(marking_worklist()->IsEmpty());
1681 CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1684 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1685 for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1686 !it.done(); it.Advance()) {
1687 if (it.frame()->type() == StackFrame::INTERPRETED) {
1690 if (it.frame()->type() == StackFrame::OPTIMIZED) {
1691 Code code = it.frame()->LookupCode();
1692 if (!code->CanDeoptAt(it.frame()->pc())) {
1693 Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
1700 void MarkCompactCollector::RecordObjectStats() {
1701 if (V8_UNLIKELY(FLAG_gc_stats)) {
1702 heap()->CreateObjectStats();
1703 ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
1704 heap()->dead_object_stats_);
1705 collector.Collect();
1706 if (V8_UNLIKELY(FLAG_gc_stats &
1707 v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1708 std::stringstream live, dead;
1709 heap()->live_object_stats_->Dump(live);
1710 heap()->dead_object_stats_->Dump(dead);
1711 TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT(
"v8.gc_stats"),
1712 "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1713 "live", TRACE_STR_COPY(live.str().c_str()),
"dead",
1714 TRACE_STR_COPY(dead.str().c_str()));
1716 if (FLAG_trace_gc_object_stats) {
1717 heap()->live_object_stats_->PrintJSON(
"live");
1718 heap()->dead_object_stats_->PrintJSON(
"dead");
1720 heap()->live_object_stats_->CheckpointObjectStats();
1721 heap()->dead_object_stats_->ClearObjectStats();
1725 void MarkCompactCollector::MarkLiveObjects() {
1726 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1730 PostponeInterruptsScope postpone(isolate());
1733 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1734 IncrementalMarking* incremental_marking = heap_->incremental_marking();
1735 if (was_marked_incrementally_) {
1736 incremental_marking->Finalize();
1738 CHECK(incremental_marking->IsStopped());
1743 DCHECK(state_ == PREPARE_GC);
1744 state_ = MARK_LIVE_OBJECTS;
1747 heap_->local_embedder_heap_tracer()->EnterFinalPause();
1749 RootMarkingVisitor root_visitor(
this);
1752 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1753 CustomRootBodyMarkingVisitor custom_root_body_visitor(
this);
1754 MarkRoots(&root_visitor, &custom_root_body_visitor);
1758 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1759 if (FLAG_parallel_marking) {
1760 heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1762 ProcessMarkingWorklist();
1764 FinishConcurrentMarking(
1765 ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1766 ProcessMarkingWorklist();
1770 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1772 DCHECK(marking_worklist()->IsEmpty());
1778 TRACE_GC(heap()->tracer(),
1779 GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
1784 PerformWrapperTracing();
1785 ProcessMarkingWorklist();
1786 }
while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
1787 !marking_worklist()->IsEmbedderEmpty());
1788 DCHECK(marking_worklist()->IsEmbedderEmpty());
1789 DCHECK(marking_worklist()->IsEmpty());
1796 TRACE_GC(heap()->tracer(),
1797 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1798 ProcessEphemeronMarking();
1799 DCHECK(marking_worklist()->IsEmpty());
1810 TRACE_GC(heap()->tracer(),
1811 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
1812 heap()->isolate()->global_handles()->IdentifyWeakHandles(
1813 &IsUnmarkedHeapObject);
1814 ProcessMarkingWorklist();
1820 TRACE_GC(heap()->tracer(),
1821 GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
1822 heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
1824 ProcessMarkingWorklist();
1829 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
1830 ProcessEphemeronMarking();
1831 DCHECK(marking_worklist()->IsEmbedderEmpty());
1832 DCHECK(marking_worklist()->IsEmpty());
1836 heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
1837 &IsUnmarkedHeapObject);
1841 if (was_marked_incrementally_) {
1842 heap()->incremental_marking()->Deactivate();
1846 void MarkCompactCollector::ClearNonLiveReferences() {
1847 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
1850 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
1855 StringTable string_table = heap()->string_table();
1856 InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
1857 string_table->IterateElements(&internalized_visitor);
1858 string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
1860 ExternalStringTableCleaner external_visitor(heap());
1861 heap()->external_string_table_.IterateAll(&external_visitor);
1862 heap()->external_string_table_.CleanUpAll();
1866 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
1868 MarkCompactWeakObjectRetainer mark_compact_object_retainer(
1869 non_atomic_marking_state());
1870 heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
1874 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
1877 ClearFullMapTransitions();
1880 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
1881 ClearWeakReferences();
1882 ClearWeakCollections();
1886 MarkDependentCodeForDeoptimization();
1888 DCHECK(weak_objects_.transition_arrays.IsEmpty());
1889 DCHECK(weak_objects_.weak_references.IsEmpty());
1890 DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
1891 DCHECK(weak_objects_.js_weak_cells.IsEmpty());
1894 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
1895 std::pair<HeapObject*, Code> weak_object_in_code;
1896 while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
1897 &weak_object_in_code)) {
1898 HeapObject*
object = weak_object_in_code.first;
1899 Code code = weak_object_in_code.second;
1900 if (!non_atomic_marking_state()->IsBlackOrGrey(
object) &&
1901 !code->embedded_objects_cleared()) {
1902 if (!code->marked_for_deoptimization()) {
1903 code->SetMarkedForDeoptimization(
"weak objects");
1904 have_code_to_deoptimize_ =
true;
1906 code->ClearEmbeddedObjects(heap_);
1907 DCHECK(code->embedded_objects_cleared());
1912 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
1913 DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
1914 Object* potential_parent = dead_target->constructor_or_backpointer();
1915 if (potential_parent->IsMap()) {
1916 Map parent = Map::cast(potential_parent);
1917 DisallowHeapAllocation no_gc_obviously;
1918 if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
1919 TransitionsAccessor(isolate(), parent, &no_gc_obviously)
1920 .HasSimpleTransitionTo(dead_target)) {
1921 ClearPotentialSimpleMapTransition(parent, dead_target);
1926 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
1928 DCHECK(!map->is_prototype_map());
1929 DCHECK(!dead_target->is_prototype_map());
1930 DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
1932 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
1933 DescriptorArray* descriptors = map->instance_descriptors();
1934 if (descriptors == dead_target->instance_descriptors() &&
1935 number_of_own_descriptors > 0) {
1936 TrimDescriptorArray(map, descriptors);
1937 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
1941 void MarkCompactCollector::ClearFullMapTransitions() {
1942 TransitionArray* array;
1943 while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
1944 int num_transitions = array->number_of_entries();
1945 if (num_transitions > 0) {
1949 if (array->GetTargetIfExists(0, isolate(), &map)) {
1950 DCHECK(!map.is_null());
1951 Map parent = Map::cast(map->constructor_or_backpointer());
1952 bool parent_is_alive =
1953 non_atomic_marking_state()->IsBlackOrGrey(parent);
1954 DescriptorArray* descriptors =
1955 parent_is_alive ? parent->instance_descriptors() :
nullptr;
1956 bool descriptors_owner_died =
1957 CompactTransitionArray(parent, array, descriptors);
1958 if (descriptors_owner_died) {
1959 TrimDescriptorArray(parent, descriptors);
1966 bool MarkCompactCollector::CompactTransitionArray(
1967 Map map, TransitionArray* transitions, DescriptorArray* descriptors) {
1968 DCHECK(!map->is_prototype_map());
1969 int num_transitions = transitions->number_of_entries();
1970 bool descriptors_owner_died =
false;
1971 int transition_index = 0;
1973 for (
int i = 0;
i < num_transitions; ++
i) {
1974 Map target = transitions->GetTarget(
i);
1975 DCHECK_EQ(target->constructor_or_backpointer(), map);
1976 if (non_atomic_marking_state()->IsWhite(target)) {
1977 if (descriptors !=
nullptr &&
1978 target->instance_descriptors() == descriptors) {
1979 DCHECK(!target->is_prototype_map());
1980 descriptors_owner_died =
true;
1983 if (
i != transition_index) {
1984 Name key = transitions->GetKey(
i);
1985 transitions->SetKey(transition_index, key);
1986 HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
1987 RecordSlot(transitions, key_slot, key);
1988 MaybeObject raw_target = transitions->GetRawTarget(
i);
1989 transitions->SetRawTarget(transition_index, raw_target);
1990 HeapObjectSlot target_slot =
1991 transitions->GetTargetSlot(transition_index);
1992 RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
1998 if (transition_index == num_transitions) {
1999 DCHECK(!descriptors_owner_died);
2006 int trim = transitions->Capacity() - transition_index;
2008 heap_->RightTrimWeakFixedArray(transitions,
2009 trim * TransitionArray::kEntrySize);
2010 transitions->SetNumberOfTransitions(transition_index);
2012 return descriptors_owner_died;
2015 void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray* array,
2016 int descriptors_to_trim) {
2017 int old_nof_all_descriptors = array->number_of_all_descriptors();
2018 int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2019 DCHECK_LT(0, descriptors_to_trim);
2020 DCHECK_LE(0, new_nof_all_descriptors);
2021 Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
2022 Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
2023 RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
2025 SlotSet::PREFREE_EMPTY_BUCKETS);
2026 RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
2028 SlotSet::PREFREE_EMPTY_BUCKETS);
2029 heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2030 ClearRecordedSlots::kNo);
2031 array->set_number_of_all_descriptors(new_nof_all_descriptors);
2034 void MarkCompactCollector::TrimDescriptorArray(Map map,
2035 DescriptorArray* descriptors) {
2036 int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2037 if (number_of_own_descriptors == 0) {
2038 DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2043 descriptors->number_of_all_descriptors() - number_of_own_descriptors;
2045 descriptors->set_number_of_descriptors(number_of_own_descriptors);
2046 RightTrimDescriptorArray(descriptors, to_trim);
2048 TrimEnumCache(map, descriptors);
2049 descriptors->Sort();
2051 if (FLAG_unbox_double_fields) {
2052 LayoutDescriptor layout_descriptor = map->layout_descriptor();
2053 layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2054 number_of_own_descriptors);
2055 SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map,
true));
2058 DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2059 map->set_owns_descriptors(
true);
2062 void MarkCompactCollector::TrimEnumCache(Map map,
2063 DescriptorArray* descriptors) {
2064 int live_enum = map->EnumLength();
2065 if (live_enum == kInvalidEnumCacheSentinel) {
2066 live_enum = map->NumberOfEnumerableProperties();
2068 if (live_enum == 0)
return descriptors->ClearEnumCache();
2069 EnumCache* enum_cache = descriptors->enum_cache();
2071 FixedArray keys = enum_cache->keys();
2072 int to_trim = keys->length() - live_enum;
2073 if (to_trim <= 0)
return;
2074 heap_->RightTrimFixedArray(keys, to_trim);
2076 FixedArray indices = enum_cache->indices();
2077 to_trim = indices->length() - live_enum;
2078 if (to_trim <= 0)
return;
2079 heap_->RightTrimFixedArray(indices, to_trim);
2082 void MarkCompactCollector::ClearWeakCollections() {
2083 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2084 EphemeronHashTable table;
2086 while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
2087 for (
int i = 0;
i < table->Capacity();
i++) {
2088 HeapObject* key = HeapObject::cast(table->KeyAt(
i));
2090 Object* value = table->ValueAt(
i);
2092 if (value->IsHeapObject()) {
2094 non_atomic_marking_state()->IsBlackOrGrey(key),
2095 non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
2098 if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2099 table->RemoveEntry(
i);
2105 void MarkCompactCollector::ClearWeakReferences() {
2106 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2107 std::pair<HeapObject*, HeapObjectSlot> slot;
2108 HeapObjectReference cleared_weak_ref =
2109 HeapObjectReference::ClearedValue(isolate());
2110 while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
2112 HeapObjectSlot location = slot.second;
2113 if ((*location)->GetHeapObjectIfWeak(&value)) {
2114 DCHECK(!value->IsCell());
2115 if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2117 RecordSlot(slot.first, location, value);
2119 if (value->IsMap()) {
2121 ClearPotentialSimpleMapTransition(Map::cast(value));
2123 location.store(cleared_weak_ref);
2129 void MarkCompactCollector::ClearJSWeakCells() {
2130 if (!FLAG_harmony_weak_refs) {
2133 JSWeakCell* weak_cell;
2134 while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
2137 HeapObject* target = HeapObject::cast(weak_cell->target());
2138 if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2140 JSWeakFactory* weak_factory = JSWeakFactory::cast(weak_cell->factory());
2141 if (!weak_factory->scheduled_for_cleanup()) {
2142 heap()->AddDirtyJSWeakFactory(
2144 [](HeapObject*
object, ObjectSlot slot, Object* target) {
2145 if (target->IsHeapObject()) {
2146 RecordSlot(
object, slot, HeapObject::cast(target));
2154 isolate(), [](HeapObject*
object, ObjectSlot slot, Object* target) {
2155 if (target->IsHeapObject()) {
2156 RecordSlot(
object, slot, HeapObject::cast(target));
2159 DCHECK(weak_factory->NeedsCleanup());
2160 DCHECK(weak_factory->scheduled_for_cleanup());
2164 HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
2165 RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2170 void MarkCompactCollector::AbortWeakObjects() {
2171 weak_objects_.transition_arrays.Clear();
2172 weak_objects_.ephemeron_hash_tables.Clear();
2173 weak_objects_.current_ephemerons.Clear();
2174 weak_objects_.next_ephemerons.Clear();
2175 weak_objects_.discovered_ephemerons.Clear();
2176 weak_objects_.weak_references.Clear();
2177 weak_objects_.weak_objects_in_code.Clear();
2178 weak_objects_.js_weak_cells.Clear();
2181 bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
2182 return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
2185 void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
2187 Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2188 Page* source_page = Page::FromAddress(host.ptr());
2189 if (target_page->IsEvacuationCandidate() &&
2190 (rinfo->host().is_null() ||
2191 !source_page->ShouldSkipEvacuationSlotRecording())) {
2192 RelocInfo::Mode rmode = rinfo->rmode();
2193 Address addr = rinfo->pc();
2194 SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2195 if (rinfo->IsInConstantPool()) {
2196 addr = rinfo->constant_pool_entry_address();
2197 if (RelocInfo::IsCodeTargetMode(rmode)) {
2198 slot_type = CODE_ENTRY_SLOT;
2200 DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2201 slot_type = OBJECT_SLOT;
2204 RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, host.ptr(), slot_type,
2209 template <AccessMode access_mode>
2210 static inline SlotCallbackResult UpdateSlot(
2211 MaybeObjectSlot slot, MaybeObject old, HeapObject* heap_obj,
2212 HeapObjectReferenceType reference_type) {
2213 MapWord map_word = heap_obj->map_word();
2214 if (map_word.IsForwardingAddress()) {
2215 DCHECK(Heap::InFromSpace(heap_obj) ||
2216 MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2217 Page::FromAddress(heap_obj->address())
2218 ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2219 MaybeObject target =
2220 reference_type == HeapObjectReferenceType::WEAK
2221 ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
2222 : HeapObjectReference::Strong(map_word.ToForwardingAddress());
2223 if (access_mode == AccessMode::NON_ATOMIC) {
2226 slot.Release_CompareAndSwap(old, target);
2228 DCHECK(!Heap::InFromSpace(target));
2229 DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2231 DCHECK(heap_obj->map()->IsMap());
2237 template <AccessMode access_mode>
2238 static inline SlotCallbackResult UpdateSlot(MaybeObjectSlot slot) {
2239 MaybeObject obj = slot.Relaxed_Load();
2240 HeapObject* heap_obj;
2241 if (obj->GetHeapObjectIfWeak(&heap_obj)) {
2242 UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
2243 }
else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
2244 return UpdateSlot<access_mode>(slot, obj, heap_obj,
2245 HeapObjectReferenceType::STRONG);
2250 template <AccessMode access_mode>
2251 static inline SlotCallbackResult UpdateStrongSlot(MaybeObjectSlot maybe_slot) {
2252 DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
2253 ObjectSlot slot(maybe_slot);
2254 Object* obj = slot.Relaxed_Load();
2255 if (obj->IsHeapObject()) {
2256 HeapObject* heap_obj = HeapObject::cast(obj);
2257 return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
2258 heap_obj, HeapObjectReferenceType::STRONG);
2272 UpdateStrongSlotInternal(p);
2276 UpdateSlotInternal(p);
2282 UpdateStrongSlotInternal(p);
2289 UpdateSlotInternal(p);
2293 void VisitRootPointer(Root root,
const char* description,
2295 UpdateStrongSlotInternal(p);
2298 void VisitRootPointers(Root root,
const char* description,
ObjectSlot start,
2300 for (
ObjectSlot p = start; p < end; ++p) UpdateStrongSlotInternal(p);
2303 void VisitEmbeddedPointer(
Code host,
RelocInfo* rinfo)
override {
2304 UpdateTypedSlotHelper::UpdateEmbeddedPointer(
2305 heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
2308 void VisitCodeTarget(
Code host,
RelocInfo* rinfo)
override {
2309 UpdateTypedSlotHelper::UpdateCodeTarget(
2310 rinfo, UpdateStrongMaybeObjectSlotInternal);
2314 static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2316 DCHECK(!(*slot)->IsWeakOrCleared());
2317 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2320 static inline SlotCallbackResult UpdateStrongSlotInternal(
ObjectSlot slot) {
2321 DCHECK(!HasWeakHeapObjectTag(*slot));
2322 return UpdateStrongSlot<AccessMode::NON_ATOMIC>(
MaybeObjectSlot(slot));
2325 static inline SlotCallbackResult UpdateSlotInternal(
MaybeObjectSlot slot) {
2326 return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
2332 static String UpdateReferenceInExternalStringTableEntry(
Heap* heap,
2334 MapWord map_word = HeapObject::cast(*p)->map_word();
2336 if (map_word.IsForwardingAddress()) {
2337 String new_string = String::cast(map_word.ToForwardingAddress());
2339 if (new_string->IsExternalString()) {
2340 MemoryChunk::MoveExternalBackingStoreBytes(
2341 ExternalBackingStoreType::kExternalString,
2342 Page::FromAddress(reinterpret_cast<Address>(*p)),
2343 Page::FromHeapObject(new_string),
2344 ExternalString::cast(new_string)->ExternalPayloadSize());
2349 return String::cast(*p);
2352 void MarkCompactCollector::EvacuatePrologue() {
2354 NewSpace* new_space = heap()->new_space();
2357 PageRange(new_space->first_allocatable_address(), new_space->top())) {
2358 new_space_evacuation_pages_.push_back(p);
2361 new_space->ResetLinearAllocationArea();
2363 heap()->new_lo_space()->Flip();
2366 DCHECK(old_space_evacuation_pages_.empty());
2367 old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2368 evacuation_candidates_.clear();
2369 DCHECK(evacuation_candidates_.empty());
2372 void MarkCompactCollector::EvacuateEpilogue() {
2373 aborted_evacuation_candidates_.clear();
2375 heap()->new_space()->set_age_mark(heap()->new_space()->top());
2377 heap()->lo_space()->FreeUnmarkedObjects();
2378 heap()->code_lo_space()->FreeUnmarkedObjects();
2379 heap()->new_lo_space()->FreeUnmarkedObjects();
2381 ReleaseEvacuationCandidates();
2383 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2386 for (Page* p : *heap()->old_space()) {
2387 DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2388 DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2389 DCHECK_NULL(p->invalidated_slots());
2396 enum EvacuationMode {
2403 static inline EvacuationMode ComputeEvacuationMode(
MemoryChunk* chunk) {
2405 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2406 return kPageNewToOld;
2407 if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2408 return kPageNewToNew;
2409 if (chunk->InNewSpace())
return kObjectsNewToOld;
2410 return kObjectsOldToOld;
2415 static intptr_t NewSpacePageEvacuationThreshold() {
2416 if (FLAG_page_promotion)
2417 return FLAG_page_promotion_threshold *
2418 MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
2419 return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
2424 local_allocator_(heap_),
2425 local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2426 new_space_visitor_(heap_, &local_allocator_, record_visitor,
2427 &local_pretenuring_feedback_),
2428 new_to_new_page_visitor_(heap_, record_visitor,
2429 &local_pretenuring_feedback_),
2430 new_to_old_page_visitor_(heap_, record_visitor,
2431 &local_pretenuring_feedback_),
2433 old_space_visitor_(heap_, &local_allocator_, record_visitor),
2435 bytes_compacted_(0) {}
2442 new_space_visitor_.AddObserver(observer);
2443 old_space_visitor_.AddObserver(observer);
2448 inline void Finalize();
2450 virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2453 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2457 intptr_t* saved_live_bytes) = 0;
2459 inline Heap* heap() {
return heap_; }
2461 void ReportCompactionProgress(
double duration, intptr_t bytes_compacted) {
2462 duration_ += duration;
2463 bytes_compacted_ += bytes_compacted;
2470 Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2475 new_to_new_page_visitor_;
2477 new_to_old_page_visitor_;
2482 intptr_t bytes_compacted_;
2485 void Evacuator::EvacuatePage(
MemoryChunk* chunk) {
2486 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
"Evacuator::EvacuatePage");
2487 DCHECK(chunk->SweepingDone());
2488 intptr_t saved_live_bytes = 0;
2489 double evacuation_time = 0.0;
2493 RawEvacuatePage(chunk, &saved_live_bytes);
2495 ReportCompactionProgress(evacuation_time, saved_live_bytes);
2496 if (FLAG_trace_evacuation) {
2497 PrintIsolate(heap()->isolate(),
2498 "evacuation[%p]: page=%p new_space=%d " 2499 "page_evacuation=%d executable=%d contains_age_mark=%d " 2500 "live_bytes=%" V8PRIdPTR
" time=%f success=%d\n",
2501 static_cast<void*>(
this), static_cast<void*>(chunk),
2502 chunk->InNewSpace(),
2503 chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2504 chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2505 chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2506 chunk->Contains(heap()->new_space()->age_mark()),
2507 saved_live_bytes, evacuation_time,
2508 chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2512 void Evacuator::Finalize() {
2513 local_allocator_.Finalize();
2514 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
2515 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
2516 new_to_old_page_visitor_.moved_bytes());
2517 heap()->IncrementSemiSpaceCopiedObjectSize(
2518 new_space_visitor_.semispace_copied_size() +
2519 new_to_new_page_visitor_.moved_bytes());
2520 heap()->IncrementYoungSurvivorsCounter(
2521 new_space_visitor_.promoted_size() +
2522 new_space_visitor_.semispace_copied_size() +
2523 new_to_old_page_visitor_.moved_bytes() +
2524 new_to_new_page_visitor_.moved_bytes());
2525 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
2532 :
Evacuator(collector->heap(), record_visitor), collector_(collector) {}
2534 GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope()
override {
2535 return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
2539 void RawEvacuatePage(
MemoryChunk* chunk, intptr_t* live_bytes)
override;
2544 void FullEvacuator::RawEvacuatePage(
MemoryChunk* chunk, intptr_t* live_bytes) {
2545 const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
2546 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2547 "FullEvacuator::RawEvacuatePage",
"evacuation_mode",
2550 collector_->non_atomic_marking_state();
2551 *live_bytes = marking_state->live_bytes(chunk);
2553 switch (evacuation_mode) {
2554 case kObjectsNewToOld:
2555 LiveObjectVisitor::VisitBlackObjectsNoFail(
2556 chunk, marking_state, &new_space_visitor_,
2557 LiveObjectVisitor::kClearMarkbits);
2561 LiveObjectVisitor::VisitBlackObjectsNoFail(
2562 chunk, marking_state, &new_to_old_page_visitor_,
2563 LiveObjectVisitor::kKeepMarking);
2564 new_to_old_page_visitor_.account_moved_bytes(
2565 marking_state->live_bytes(chunk));
2569 LiveObjectVisitor::VisitBlackObjectsNoFail(
2570 chunk, marking_state, &new_to_new_page_visitor_,
2571 LiveObjectVisitor::kKeepMarking);
2572 new_to_new_page_visitor_.account_moved_bytes(
2573 marking_state->live_bytes(chunk));
2576 case kObjectsOldToOld: {
2577 const bool success = LiveObjectVisitor::VisitBlackObjects(
2578 chunk, marking_state, &old_space_visitor_,
2579 LiveObjectVisitor::kClearMarkbits, &failed_object);
2583 collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
2606 evacuator_(evacuator),
2607 tracer_(isolate->heap()->tracer()) {}
2609 void RunInParallel()
override {
2610 TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
2612 while ((item = GetItem<EvacuationItem>()) !=
nullptr) {
2613 evacuator_->EvacuatePage(item->chunk());
2614 item->MarkFinished();
2623 template <
class Evacuator,
class Collector>
2624 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
2629 double compaction_speed = 0;
2630 if (FLAG_trace_evacuation) {
2631 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
2634 const bool profiling = isolate()->LogObjectRelocation();
2635 ProfilingMigrationObserver profiling_observer(heap());
2637 const int wanted_num_tasks =
2638 NumberOfParallelCompactionTasks(job->NumberOfItems());
2639 Evacuator** evacuators =
new Evacuator*[wanted_num_tasks];
2640 for (
int i = 0;
i < wanted_num_tasks;
i++) {
2641 evacuators[
i] =
new Evacuator(collector, record_visitor);
2642 if (profiling) evacuators[
i]->AddObserver(&profiling_observer);
2643 if (migration_observer !=
nullptr)
2644 evacuators[
i]->AddObserver(migration_observer);
2645 job->AddTask(
new PageEvacuationTask(heap()->isolate(), evacuators[
i]));
2647 job->Run(isolate()->async_counters());
2648 for (
int i = 0;
i < wanted_num_tasks;
i++) {
2649 evacuators[
i]->Finalize();
2650 delete evacuators[
i];
2652 delete[] evacuators;
2654 if (FLAG_trace_evacuation) {
2655 PrintIsolate(isolate(),
2656 "%8.0f ms: evacuation-summary: parallel=%s pages=%d " 2657 "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
2658 " compaction_speed=%.f\n",
2659 isolate()->time_millis_since_init(),
2660 FLAG_parallel_compaction ?
"yes" :
"no", job->NumberOfItems(),
2661 wanted_num_tasks, job->NumberOfTasks(),
2662 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
2663 live_bytes, compaction_speed);
2667 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
2668 const bool reduce_memory = heap()->ShouldReduceMemory();
2669 const Address age_mark = heap()->new_space()->age_mark();
2670 return !reduce_memory && !p->NeverEvacuate() &&
2671 (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
2672 !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
2675 void MarkCompactCollector::EvacuatePagesInParallel() {
2676 ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
2677 &page_parallel_job_semaphore_);
2678 intptr_t live_bytes = 0;
2680 for (Page* page : old_space_evacuation_pages_) {
2681 live_bytes += non_atomic_marking_state()->live_bytes(page);
2682 evacuation_job.AddItem(
new EvacuationItem(page));
2685 for (Page* page : new_space_evacuation_pages_) {
2686 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
2687 if (live_bytes_on_page == 0 && !page->contains_array_buffers())
continue;
2688 live_bytes += live_bytes_on_page;
2689 if (ShouldMovePage(page, live_bytes_on_page)) {
2690 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
2691 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
2692 DCHECK_EQ(heap()->old_space(), page->owner());
2695 heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
2698 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
2701 evacuation_job.AddItem(
new EvacuationItem(page));
2705 LargePage* current = heap()->new_lo_space()->first_page();
2706 IncrementalMarking::NonAtomicMarkingState* marking_state =
2707 heap()->incremental_marking()->non_atomic_marking_state();
2709 LargePage* next_current = current->next_page();
2710 HeapObject*
object = current->GetObject();
2711 DCHECK(!marking_state->IsGrey(
object));
2712 if (marking_state->IsBlack(
object)) {
2713 heap_->lo_space()->PromoteNewLargeObject(current);
2714 current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
2715 evacuation_job.AddItem(
new EvacuationItem(current));
2717 current = next_current;
2720 if (evacuation_job.NumberOfItems() == 0)
return;
2722 RecordMigratedSlotVisitor record_visitor(
this);
2723 CreateAndExecuteEvacuationTasks<FullEvacuator>(
2724 this, &evacuation_job, &record_visitor,
nullptr, live_bytes);
2725 PostProcessEvacuationCandidates();
2731 if (object->IsHeapObject()) {
2732 HeapObject* heap_object = HeapObject::cast(
object);
2733 MapWord map_word = heap_object->map_word();
2734 if (map_word.IsForwardingAddress()) {
2735 return map_word.ToForwardingAddress();
2742 void MarkCompactCollector::RecordLiveSlotsOnPage(
Page* page) {
2744 LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
2746 LiveObjectVisitor::kKeepMarking);
2749 template <
class Visitor,
typename MarkingState>
2750 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
2751 MarkingState* marking_state,
2753 IterationMode iteration_mode,
2754 HeapObject** failed_object) {
2755 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2756 "LiveObjectVisitor::VisitBlackObjects");
2757 for (
auto object_and_size :
2758 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2759 HeapObject*
const object = object_and_size.first;
2760 if (!visitor->Visit(
object, object_and_size.second)) {
2761 if (iteration_mode == kClearMarkbits) {
2762 marking_state->bitmap(chunk)->ClearRange(
2763 chunk->AddressToMarkbitIndex(chunk->area_start()),
2764 chunk->AddressToMarkbitIndex(object->address()));
2765 *failed_object = object;
2770 if (iteration_mode == kClearMarkbits) {
2771 marking_state->ClearLiveness(chunk);
2776 template <
class Visitor,
typename MarkingState>
2777 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
2778 MarkingState* marking_state,
2780 IterationMode iteration_mode) {
2781 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2782 "LiveObjectVisitor::VisitBlackObjectsNoFail");
2783 for (
auto object_and_size :
2784 LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2785 HeapObject*
const object = object_and_size.first;
2786 DCHECK(marking_state->IsBlack(
object));
2787 const bool success = visitor->Visit(
object, object_and_size.second);
2791 if (iteration_mode == kClearMarkbits) {
2792 marking_state->ClearLiveness(chunk);
2796 template <
class Visitor,
typename MarkingState>
2797 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
2798 MarkingState* marking_state,
2800 IterationMode iteration_mode) {
2801 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2802 "LiveObjectVisitor::VisitGreyObjectsNoFail");
2803 for (
auto object_and_size :
2804 LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
2805 HeapObject*
const object = object_and_size.first;
2806 DCHECK(marking_state->IsGrey(
object));
2807 const bool success = visitor->Visit(
object, object_and_size.second);
2811 if (iteration_mode == kClearMarkbits) {
2812 marking_state->ClearLiveness(chunk);
2816 template <
typename MarkingState>
2817 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
2818 MarkingState* marking_state) {
2819 int new_live_size = 0;
2820 for (
auto object_and_size :
2821 LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
2822 new_live_size += object_and_size.second;
2824 marking_state->SetLiveBytes(chunk, new_live_size);
2827 void MarkCompactCollector::Evacuate() {
2828 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
2829 base::MutexGuard guard(heap()->relocation_mutex());
2832 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
2837 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
2838 EvacuationScope evacuation_scope(
this);
2839 EvacuatePagesInParallel();
2842 UpdatePointersAfterEvacuation();
2845 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
2846 if (!heap()->new_space()->Rebalance()) {
2847 heap()->FatalProcessOutOfMemory(
"NewSpace::Rebalance");
2855 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2858 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
2860 for (Page* p : new_space_evacuation_pages_) {
2861 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2862 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2863 sweeper()->AddPageForIterability(p);
2864 }
else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2865 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2866 DCHECK_EQ(OLD_SPACE, p->owner()->identity());
2867 sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
2870 new_space_evacuation_pages_.clear();
2872 for (Page* p : old_space_evacuation_pages_) {
2876 SkipList* list = p->skip_list();
2877 if (list !=
nullptr) list->Clear();
2878 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
2879 sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
2880 p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
2886 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
2891 if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
2892 FullEvacuationVerifier verifier(heap());
2901 virtual void Process() = 0;
2907 GCTracer::BackgroundScope::ScopeId scope)
2909 tracer_(isolate->heap()->tracer()),
2912 void RunInParallel()
override {
2913 TRACE_BACKGROUND_GC(tracer_, scope_);
2915 while ((item = GetItem<UpdatingItem>()) !=
nullptr) {
2917 item->MarkFinished();
2923 GCTracer::BackgroundScope::ScopeId scope_;
2926 template <
typename MarkingState>
2930 MarkingState* marking_state)
2934 marking_state_(marking_state) {}
2937 void Process()
override {
2938 if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2948 void ProcessVisitAll() {
2949 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2950 "ToSpaceUpdatingItem::ProcessVisitAll");
2952 for (
Address cur = start_; cur < end_;) {
2953 HeapObject*
object = HeapObject::FromAddress(cur);
2954 Map map =
object->map();
2955 int size =
object->SizeFromMap(map);
2956 object->IterateBodyFast(map, size, &visitor);
2961 void ProcessVisitLive() {
2962 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2963 "ToSpaceUpdatingItem::ProcessVisitLive");
2968 chunk_, marking_state_->bitmap(chunk_))) {
2969 object_and_size.first->IterateBodyFast(&visitor);
2976 MarkingState* marking_state_;
2979 template <
typename MarkingState>
2984 RememberedSetUpdatingMode updating_mode)
2986 marking_state_(marking_state),
2988 updating_mode_(updating_mode) {}
2991 void Process()
override {
2992 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
2993 "RememberedSetUpdatingItem::Process");
2994 base::MutexGuard guard(chunk_->mutex());
2996 UpdateUntypedPointers();
2997 UpdateTypedPointers();
3001 inline SlotCallbackResult CheckAndUpdateOldToNewSlot(
MaybeObjectSlot slot) {
3003 if (!(*slot)->GetHeapObject(&heap_object)) {
3006 if (Heap::InFromSpace(heap_object)) {
3007 MapWord map_word = heap_object->map_word();
3008 if (map_word.IsForwardingAddress()) {
3010 map_word.ToForwardingAddress());
3012 bool success = (*slot)->GetHeapObject(&heap_object);
3019 if (Heap::InToSpace(heap_object)) {
3022 }
else if (Heap::InToSpace(heap_object)) {
3028 if (Page::FromAddress(heap_object->address())
3029 ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3033 if (marking_state_->IsBlackOrGrey(heap_object)) {
3041 DCHECK(!Heap::InNewSpace(heap_object));
3046 void UpdateUntypedPointers() {
3047 if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
nullptr) {
3051 return CheckAndUpdateOldToNewSlot(slot);
3053 SlotSet::PREFREE_EMPTY_BUCKETS);
3055 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3056 (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
nullptr)) {
3061 if (!filter.IsValid(slot.address()))
return REMOVE_SLOT;
3062 return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3064 SlotSet::PREFREE_EMPTY_BUCKETS);
3066 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3067 chunk_->invalidated_slots() !=
nullptr) {
3069 for (
auto object_size : *chunk_->invalidated_slots()) {
3071 int size = object_size.second;
3072 DCHECK_LE(object->SizeFromMap(object->map()), size);
3077 chunk_->ReleaseInvalidatedSlots();
3081 void UpdateTypedPointers() {
3082 if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3084 CHECK_NE(chunk_->owner(), heap_->map_space());
3085 const auto check_and_update_old_to_new_slot_fn =
3087 return CheckAndUpdateOldToNewSlot(slot);
3090 chunk_, [=](SlotType slot_type,
Address host_addr,
Address slot) {
3091 return UpdateTypedSlotHelper::UpdateTypedSlot(
3092 heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3095 if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3096 (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3098 CHECK_NE(chunk_->owner(), heap_->map_space());
3100 chunk_, [
this](SlotType slot_type,
Address host_addr,
Address slot) {
3103 return UpdateTypedSlotHelper::UpdateTypedSlot(
3104 heap_, slot_type, slot,
3105 UpdateStrongSlot<AccessMode::NON_ATOMIC>);
3111 MarkingState* marking_state_;
3113 RememberedSetUpdatingMode updating_mode_;
3116 UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
3119 chunk, start, end, non_atomic_marking_state());
3122 UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
3123 MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3124 return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
3125 heap(), non_atomic_marking_state(), chunk, updating_mode);
3131 size_t start,
size_t end)
3133 global_handles_(global_handles),
3138 void Process()
override {
3139 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
3140 "GlobalHandlesUpdatingItem::Process");
3142 global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
3159 enum EvacuationState { kRegular, kAborted };
3162 : page_(page), state_(state) {}
3165 void Process()
override {
3166 TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
3167 "ArrayBufferTrackerUpdatingItem::Process",
"EvacuationState",
3170 case EvacuationState::kRegular:
3171 ArrayBufferTracker::ProcessBuffers(
3172 page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3174 case EvacuationState::kAborted:
3175 ArrayBufferTracker::ProcessBuffers(
3176 page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3183 const EvacuationState state_;
3186 int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3189 const Address space_start = heap()->new_space()->first_allocatable_address();
3190 const Address space_end = heap()->new_space()->top();
3194 page->Contains(space_start) ? space_start : page->area_start();
3195 Address end = page->Contains(space_end) ? space_end : page->area_end();
3196 job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
3199 if (pages == 0)
return 0;
3200 return NumberOfParallelToSpacePointerUpdateTasks(pages);
3203 template <
typename IterateableSpace>
3204 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3205 ItemParallelJob* job, IterateableSpace* space,
3206 RememberedSetUpdatingMode mode) {
3208 for (MemoryChunk* chunk : *space) {
3209 const bool contains_old_to_old_slots =
3210 chunk->slot_set<OLD_TO_OLD>() !=
nullptr ||
3211 chunk->typed_slot_set<OLD_TO_OLD>() !=
nullptr;
3212 const bool contains_old_to_new_slots =
3213 chunk->slot_set<OLD_TO_NEW>() !=
nullptr ||
3214 chunk->typed_slot_set<OLD_TO_NEW>() !=
nullptr;
3215 const bool contains_invalidated_slots =
3216 chunk->invalidated_slots() !=
nullptr;
3217 if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
3218 !contains_invalidated_slots)
3220 if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3221 contains_invalidated_slots) {
3222 job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
3229 int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
3230 ItemParallelJob* job) {
3232 for (Page* p : new_space_evacuation_pages_) {
3233 if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
3234 if (p->local_tracker() ==
nullptr)
continue;
3237 job->AddItem(
new ArrayBufferTrackerUpdatingItem(
3238 p, ArrayBufferTrackerUpdatingItem::kRegular));
3244 int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
3245 ItemParallelJob* job) {
3247 for (Page* p : old_space_evacuation_pages_) {
3248 if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
3249 p->IsEvacuationCandidate()) {
3250 if (p->local_tracker() ==
nullptr)
continue;
3253 job->AddItem(
new ArrayBufferTrackerUpdatingItem(
3254 p, ArrayBufferTrackerUpdatingItem::kRegular));
3257 for (
auto object_and_page : aborted_evacuation_candidates_) {
3258 Page* p = object_and_page.second;
3259 if (p->local_tracker() ==
nullptr)
continue;
3262 job->AddItem(
new ArrayBufferTrackerUpdatingItem(
3263 p, ArrayBufferTrackerUpdatingItem::kAborted));
3268 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3269 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3271 PointersUpdatingVisitor updating_visitor(heap());
3274 TRACE_GC(heap()->tracer(),
3275 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3276 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3280 TRACE_GC(heap()->tracer(),
3281 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3282 ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3283 &page_parallel_job_semaphore_);
3285 int remembered_set_pages = 0;
3286 remembered_set_pages += CollectRememberedSetUpdatingItems(
3287 &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
3288 remembered_set_pages += CollectRememberedSetUpdatingItems(
3289 &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
3290 remembered_set_pages += CollectRememberedSetUpdatingItems(
3291 &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
3292 remembered_set_pages += CollectRememberedSetUpdatingItems(
3293 &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
3294 const int remembered_set_tasks =
3295 remembered_set_pages == 0
3297 : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3299 const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3300 const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3301 for (
int i = 0;
i < num_tasks;
i++) {
3302 updating_job.AddTask(
new PointersUpdatingTask(
3304 GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3306 updating_job.Run(isolate()->async_counters());
3314 TRACE_GC(heap()->tracer(),
3315 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3316 ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3317 &page_parallel_job_semaphore_);
3319 int array_buffer_pages = 0;
3320 array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3321 array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
3323 int remembered_set_pages = 0;
3324 remembered_set_pages += CollectRememberedSetUpdatingItems(
3325 &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
3326 const int remembered_set_tasks =
3327 remembered_set_pages == 0
3329 : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3331 const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
3332 if (num_tasks > 0) {
3333 for (
int i = 0;
i < num_tasks;
i++) {
3334 updating_job.AddTask(
new PointersUpdatingTask(
3336 GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3338 updating_job.Run(isolate()->async_counters());
3339 heap()->array_buffer_collector()->FreeAllocations();
3344 TRACE_GC(heap()->tracer(),
3345 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3347 heap_->UpdateReferencesInExternalStringTable(
3348 &UpdateReferenceInExternalStringTableEntry);
3350 EvacuationWeakObjectRetainer evacuation_object_retainer;
3351 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3355 void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3356 HeapObject* failed_object, MemoryChunk* chunk) {
3357 base::MutexGuard guard(&mutex_);
3359 aborted_evacuation_candidates_.push_back(
3360 std::make_pair(failed_object, static_cast<Page*>(chunk)));
3363 void MarkCompactCollector::PostProcessEvacuationCandidates() {
3364 for (
auto object_and_page : aborted_evacuation_candidates_) {
3365 HeapObject* failed_object = object_and_page.first;
3366 Page* page = object_and_page.second;
3367 page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3372 RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3373 failed_object->address(),
3374 SlotSet::PREFREE_EMPTY_BUCKETS);
3375 RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3376 failed_object->address());
3378 LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
3380 EvacuateRecordOnlyVisitor record_visitor(heap());
3381 LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3383 LiveObjectVisitor::kKeepMarking);
3386 const int aborted_pages =
3387 static_cast<int>(aborted_evacuation_candidates_.size());
3388 int aborted_pages_verified = 0;
3389 for (Page* p : old_space_evacuation_pages_) {
3390 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3393 p->ClearEvacuationCandidate();
3394 aborted_pages_verified++;
3396 DCHECK(p->IsEvacuationCandidate());
3397 DCHECK(p->SweepingDone());
3398 p->owner()->memory_chunk_list().Remove(p);
3401 DCHECK_EQ(aborted_pages_verified, aborted_pages);
3402 if (FLAG_trace_evacuation && (aborted_pages > 0)) {
3403 PrintIsolate(isolate(),
"%8.0f ms: evacuation: aborted=%d\n",
3404 isolate()->time_millis_since_init(), aborted_pages);
3408 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3409 for (Page* p : old_space_evacuation_pages_) {
3410 if (!p->IsEvacuationCandidate())
continue;
3411 PagedSpace* space =
static_cast<PagedSpace*
>(p->owner());
3412 non_atomic_marking_state()->SetLiveBytes(p, 0);
3413 CHECK(p->SweepingDone());
3414 space->ReleasePage(p);
3416 old_space_evacuation_pages_.clear();
3417 compacting_ =
false;
3420 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3421 space->ClearStats();
3423 int will_be_swept = 0;
3424 bool unused_page_present =
false;
3427 for (
auto it = space->begin(); it != space->end();) {
3429 DCHECK(p->SweepingDone());
3431 if (p->IsEvacuationCandidate()) {
3433 DCHECK(!evacuation_candidates_.empty());
3437 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3442 p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
3443 sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3444 Heap::ShouldZapGarbage()
3445 ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
3446 : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
3447 space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
3452 if (non_atomic_marking_state()->live_bytes(p) == 0) {
3453 if (unused_page_present) {
3454 if (FLAG_gc_verbose) {
3455 PrintIsolate(isolate(),
"sweeping: released page: %p",
3456 static_cast<void*>(p));
3458 ArrayBufferTracker::FreeAll(p);
3459 space->memory_chunk_list().Remove(p);
3460 space->ReleasePage(p);
3463 unused_page_present =
true;
3466 sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
3470 if (FLAG_gc_verbose) {
3471 PrintIsolate(isolate(),
"sweeping: space=%s initialized_for_sweeping=%d",
3472 space->name(), will_be_swept);
3476 void MarkCompactCollector::StartSweepSpaces() {
3477 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3479 state_ = SWEEP_SPACES;
3484 GCTracer::Scope sweep_scope(heap()->tracer(),
3485 GCTracer::Scope::MC_SWEEP_OLD);
3486 StartSweepSpace(heap()->old_space());
3489 GCTracer::Scope sweep_scope(heap()->tracer(),
3490 GCTracer::Scope::MC_SWEEP_CODE);
3491 StartSweepSpace(heap()->code_space());
3494 GCTracer::Scope sweep_scope(heap()->tracer(),
3495 GCTracer::Scope::MC_SWEEP_MAP);
3496 StartSweepSpace(heap()->map_space());
3498 sweeper()->StartSweeping();
3502 void MarkCompactCollector::MarkingWorklist::PrintWorklist(
3503 const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
3504 std::map<InstanceType, int> count;
3505 int total_count = 0;
3506 worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
3508 count[obj->map()->instance_type()]++;
3510 std::vector<std::pair<int, InstanceType>> rank;
3511 rank.reserve(count.size());
3512 for (
const auto&
i : count) {
3513 rank.emplace_back(
i.second,
i.first);
3515 std::map<InstanceType, std::string> instance_type_name;
3516 #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name; 3517 INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
3518 #undef INSTANCE_TYPE_NAME 3519 std::sort(rank.begin(), rank.end(),
3520 std::greater<std::pair<int, InstanceType>>());
3521 PrintF(
"Worklist %s: %d\n", worklist_name, total_count);
3522 for (
auto i : rank) {
3523 PrintF(
" [%s]: %d\n", instance_type_name[
i.second].c_str(),
i.first);
3527 #ifdef ENABLE_MINOR_MC 3533 class YoungGenerationMarkingVerifier :
public MarkingVerifier {
3535 explicit YoungGenerationMarkingVerifier(Heap* heap)
3536 : MarkingVerifier(heap),
3538 heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
3540 Bitmap* bitmap(
const MemoryChunk* chunk)
override {
3541 return marking_state_->bitmap(chunk);
3544 bool IsMarked(HeapObject*
object)
override {
3545 return marking_state_->IsGrey(
object);
3548 bool IsBlackOrGrey(HeapObject*
object)
override {
3549 return marking_state_->IsBlackOrGrey(
object);
3552 void Run()
override {
3553 VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3554 VerifyMarking(heap_->new_space());
3557 void VerifyPointers(ObjectSlot start, ObjectSlot end)
override {
3558 for (ObjectSlot current = start; current < end; ++current) {
3559 DCHECK(!HasWeakHeapObjectTag(*current));
3560 if ((*current)->IsHeapObject()) {
3561 HeapObject*
object = HeapObject::cast(*current);
3562 if (!Heap::InNewSpace(
object))
return;
3563 CHECK(IsMarked(
object));
3568 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end)
override {
3569 for (MaybeObjectSlot current = start; current < end; ++current) {
3572 if ((*current)->GetHeapObject(&
object)) {
3573 if (!Heap::InNewSpace(
object)) {
3576 CHECK(IsMarked(
object));
3582 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3585 class YoungGenerationEvacuationVerifier :
public EvacuationVerifier {
3587 explicit YoungGenerationEvacuationVerifier(Heap* heap)
3588 : EvacuationVerifier(heap) {}
3590 void Run()
override {
3591 VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3592 VerifyEvacuation(heap_->new_space());
3593 VerifyEvacuation(heap_->old_space());
3594 VerifyEvacuation(heap_->code_space());
3595 VerifyEvacuation(heap_->map_space());
3599 void VerifyPointers(ObjectSlot start, ObjectSlot end)
override {
3600 for (ObjectSlot current = start; current < end; ++current) {
3601 if ((*current)->IsHeapObject()) {
3602 HeapObject*
object = HeapObject::cast(*current);
3603 CHECK_IMPLIES(Heap::InNewSpace(
object), Heap::InToSpace(
object));
3607 void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end)
override {
3608 for (MaybeObjectSlot current = start; current < end; ++current) {
3610 if ((*current)->GetHeapObject(&
object)) {
3611 CHECK_IMPLIES(Heap::InNewSpace(
object), Heap::InToSpace(
object));
3617 #endif // VERIFY_HEAP 3619 template <
class ParallelItem>
3620 void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
3621 ItemParallelJob* job) {
3623 const size_t kGlobalHandlesBufferSize = 1000;
3624 const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
3625 for (
size_t start = 0; start < new_space_nodes;
3626 start += kGlobalHandlesBufferSize) {
3627 size_t end = start + kGlobalHandlesBufferSize;
3628 if (end > new_space_nodes) end = new_space_nodes;
3629 job->AddItem(
new ParallelItem(heap, global_handles, start, end));
3633 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, ObjectSlot p) {
3634 DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
3635 return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
3636 ->non_atomic_marking_state()
3637 ->IsGrey(HeapObject::cast(*p));
3642 class YoungGenerationMarkingVisitor final
3643 :
public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
3645 YoungGenerationMarkingVisitor(
3646 MinorMarkCompactCollector::MarkingState* marking_state,
3647 MinorMarkCompactCollector::MarkingWorklist* global_worklist,
int task_id)
3648 : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
3650 V8_INLINE
void VisitPointers(HeapObject* host, ObjectSlot start,
3651 ObjectSlot end)
final {
3652 for (ObjectSlot p = start; p < end; ++p) {
3653 VisitPointer(host, p);
3657 V8_INLINE
void VisitPointers(HeapObject* host, MaybeObjectSlot start,
3658 MaybeObjectSlot end)
final {
3659 for (MaybeObjectSlot p = start; p < end; ++p) {
3660 VisitPointer(host, p);
3664 V8_INLINE
void VisitPointer(HeapObject* host, ObjectSlot slot)
final {
3665 Object* target = *slot;
3666 DCHECK(!HasWeakHeapObjectTag(target));
3667 if (Heap::InNewSpace(target)) {
3668 HeapObject* target_object = HeapObject::cast(target);
3669 MarkObjectViaMarkingWorklist(target_object);
3673 V8_INLINE
void VisitPointer(HeapObject* host, MaybeObjectSlot slot)
final {
3674 MaybeObject target = *slot;
3675 if (Heap::InNewSpace(target)) {
3676 HeapObject* target_object;
3679 if (target->GetHeapObject(&target_object)) {
3680 MarkObjectViaMarkingWorklist(target_object);
3686 inline void MarkObjectViaMarkingWorklist(HeapObject*
object) {
3687 if (marking_state_->WhiteToGrey(
object)) {
3689 CHECK(worklist_.Push(
object));
3693 MinorMarkCompactCollector::MarkingWorklist::View worklist_;
3694 MinorMarkCompactCollector::MarkingState* marking_state_;
3697 void MinorMarkCompactCollector::SetUp() {}
3699 void MinorMarkCompactCollector::TearDown() {}
3701 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
3702 : MarkCompactCollectorBase(heap),
3703 worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
3704 main_marking_visitor_(new YoungGenerationMarkingVisitor(
3705 marking_state(), worklist_, kMainMarker)),
3706 page_parallel_job_semaphore_(0) {
3708 kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
3709 "more marker tasks than marking deque can handle");
3712 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
3714 delete main_marking_visitor_;
3717 int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(
int pages) {
3718 DCHECK_GT(pages, 0);
3719 if (!FLAG_minor_mc_parallel_marking)
return 1;
3722 const int kPagesPerTask = 2;
3723 const int wanted_tasks = Max(1, pages / kPagesPerTask);
3724 return Min(NumberOfAvailableCores(),
3726 MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
3729 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
3730 for (Page* p : sweep_to_iterate_pages_) {
3731 if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
3732 p->ClearFlag(Page::SWEEP_TO_ITERATE);
3733 non_atomic_marking_state()->ClearLiveness(p);
3736 sweep_to_iterate_pages_.clear();
3739 class YoungGenerationMigrationObserver final :
public MigrationObserver {
3741 YoungGenerationMigrationObserver(Heap* heap,
3742 MarkCompactCollector* mark_compact_collector)
3743 : MigrationObserver(heap),
3744 mark_compact_collector_(mark_compact_collector) {}
3746 inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
3750 if (heap_->incremental_marking()->IsMarking()) {
3752 heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
3753 heap_->incremental_marking()->TransferColor(src, dst);
3759 MarkCompactCollector* mark_compact_collector_;
3762 class YoungGenerationRecordMigratedSlotVisitor final
3763 :
public RecordMigratedSlotVisitor {
3765 explicit YoungGenerationRecordMigratedSlotVisitor(
3766 MarkCompactCollector* collector)
3767 : RecordMigratedSlotVisitor(collector) {}
3769 void VisitCodeTarget(Code host, RelocInfo* rinfo)
final { UNREACHABLE(); }
3770 void VisitEmbeddedPointer(Code host, RelocInfo* rinfo)
final {
3777 inline bool IsLive(HeapObject*
object) {
3778 return collector_->non_atomic_marking_state()->IsBlack(
object);
3781 inline void RecordMigratedSlot(HeapObject* host, MaybeObject value,
3782 Address slot)
final {
3783 if (value->IsStrongOrWeak()) {
3784 Page* p = Page::FromAddress(value.ptr());
3785 if (p->InNewSpace()) {
3786 DCHECK_IMPLIES(p->InToSpace(),
3787 p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3788 RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
3789 Page::FromAddress(slot), slot);
3790 }
else if (p->IsEvacuationCandidate() && IsLive(host)) {
3791 RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
3792 Page::FromAddress(slot), slot);
3798 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
3799 TRACE_GC(heap()->tracer(),
3800 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
3802 PointersUpdatingVisitor updating_visitor(heap());
3803 ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3804 &page_parallel_job_semaphore_);
3806 CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3808 SeedGlobalHandles<GlobalHandlesUpdatingItem>(
3809 heap(), isolate()->global_handles(), &updating_job);
3810 const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3811 int remembered_set_pages = 0;
3812 remembered_set_pages += CollectRememberedSetUpdatingItems(
3813 &updating_job, heap()->old_space(),
3814 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3815 remembered_set_pages += CollectRememberedSetUpdatingItems(
3816 &updating_job, heap()->code_space(),
3817 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3818 remembered_set_pages += CollectRememberedSetUpdatingItems(
3819 &updating_job, heap()->map_space(),
3820 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3821 remembered_set_pages += CollectRememberedSetUpdatingItems(
3822 &updating_job, heap()->lo_space(),
3823 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3824 remembered_set_pages += CollectRememberedSetUpdatingItems(
3825 &updating_job, heap()->code_lo_space(),
3826 RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3827 const int remembered_set_tasks =
3828 remembered_set_pages == 0 ? 0
3829 : NumberOfParallelPointerUpdateTasks(
3830 remembered_set_pages, old_to_new_slots_);
3831 const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3832 for (
int i = 0;
i < num_tasks;
i++) {
3833 updating_job.AddTask(
new PointersUpdatingTask(
3834 isolate(), GCTracer::BackgroundScope::
3835 MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3839 TRACE_GC(heap()->tracer(),
3840 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3841 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
3844 TRACE_GC(heap()->tracer(),
3845 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
3846 updating_job.Run(isolate()->async_counters());
3847 heap()->array_buffer_collector()->FreeAllocations();
3851 TRACE_GC(heap()->tracer(),
3852 GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
3854 EvacuationWeakObjectRetainer evacuation_object_retainer;
3855 heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3858 heap()->UpdateNewSpaceReferencesInExternalStringTable(
3859 &UpdateReferenceInExternalStringTableEntry);
3863 class MinorMarkCompactCollector::RootMarkingVisitor :
public RootVisitor {
3865 explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
3866 : collector_(collector) {}
3868 void VisitRootPointer(Root root,
const char* description,
3869 ObjectSlot p)
final {
3870 MarkObjectByPointer(p);
3873 void VisitRootPointers(Root root,
const char* description, ObjectSlot start,
3874 ObjectSlot end)
final {
3875 for (ObjectSlot p = start; p < end; ++p) {
3876 MarkObjectByPointer(p);
3881 V8_INLINE
void MarkObjectByPointer(ObjectSlot p) {
3882 if (!(*p)->IsHeapObject())
return;
3883 collector_->MarkRootObject(HeapObject::cast(*p));
3885 MinorMarkCompactCollector*
const collector_;
3888 void MinorMarkCompactCollector::CollectGarbage() {
3890 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
3891 heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
3892 CleanupSweepToIteratePages();
3896 ClearNonLiveReferences();
3898 if (FLAG_verify_heap) {
3899 YoungGenerationMarkingVerifier verifier(heap());
3902 #endif // VERIFY_HEAP 3906 if (FLAG_verify_heap) {
3907 YoungGenerationEvacuationVerifier verifier(heap());
3910 #endif // VERIFY_HEAP 3913 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
3914 heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
3918 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
3920 PageRange(heap()->new_space()->from_space().first_page(),
nullptr)) {
3921 DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
3922 non_atomic_marking_state()->ClearLiveness(p);
3923 if (FLAG_concurrent_marking) {
3926 heap()->concurrent_marking()->ClearLiveness(p);
3931 RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
3932 heap(), [](MemoryChunk* chunk) {
3933 if (chunk->SweepingDone()) {
3934 RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3936 RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
3940 heap()->account_external_memory_concurrently_freed();
3943 void MinorMarkCompactCollector::MakeIterable(
3944 Page* p, MarkingTreatmentMode marking_mode,
3945 FreeSpaceTreatmentMode free_space_mode) {
3948 MarkCompactCollector* full_collector = heap()->mark_compact_collector();
3949 Address free_start = p->area_start();
3951 for (
auto object_and_size :
3952 LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
3953 HeapObject*
const object = object_and_size.first;
3954 DCHECK(non_atomic_marking_state()->IsGrey(
object));
3955 Address free_end =
object->address();
3956 if (free_end != free_start) {
3957 CHECK_GT(free_end, free_start);
3958 size_t size =
static_cast<size_t>(free_end - free_start);
3959 full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3960 p->AddressToMarkbitIndex(free_start),
3961 p->AddressToMarkbitIndex(free_end));
3962 if (free_space_mode == ZAP_FREE_SPACE) {
3963 ZapCode(free_start, size);
3965 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3966 ClearRecordedSlots::kNo);
3968 Map map =
object->synchronized_map();
3969 int size =
object->SizeFromMap(map);
3970 free_start = free_end + size;
3973 if (free_start != p->area_end()) {
3974 CHECK_GT(p->area_end(), free_start);
3975 size_t size =
static_cast<size_t>(p->area_end() - free_start);
3976 full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3977 p->AddressToMarkbitIndex(free_start),
3978 p->AddressToMarkbitIndex(p->area_end()));
3979 if (free_space_mode == ZAP_FREE_SPACE) {
3980 ZapCode(free_start, size);
3982 p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3983 ClearRecordedSlots::kNo);
3986 if (marking_mode == MarkingTreatmentMode::CLEAR) {
3987 non_atomic_marking_state()->ClearLiveness(p);
3988 p->ClearFlag(Page::SWEEP_TO_ITERATE);
3995 class YoungGenerationExternalStringTableCleaner :
public RootVisitor {
3997 YoungGenerationExternalStringTableCleaner(
3998 MinorMarkCompactCollector* collector)
3999 : heap_(collector->heap()),
4000 marking_state_(collector->non_atomic_marking_state()) {}
4002 void VisitRootPointers(Root root,
const char* description, ObjectSlot start,
4003 ObjectSlot end)
override {
4004 DCHECK_EQ(static_cast<int>(root),
4005 static_cast<int>(Root::kExternalStringsTable));
4007 for (ObjectSlot p = start; p < end; ++p) {
4009 if (o->IsHeapObject()) {
4010 HeapObject* heap_object = HeapObject::cast(o);
4011 if (marking_state_->IsWhite(heap_object)) {
4012 if (o->IsExternalString()) {
4013 heap_->FinalizeExternalString(String::cast(*p));
4016 DCHECK(o->IsThinString());
4019 p.store(ReadOnlyRoots(heap_).the_hole_value());
4027 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4032 class MinorMarkCompactWeakObjectRetainer :
public WeakObjectRetainer {
4034 explicit MinorMarkCompactWeakObjectRetainer(
4035 MinorMarkCompactCollector* collector)
4036 : marking_state_(collector->non_atomic_marking_state()) {}
4038 Object* RetainAs(Object*
object)
override {
4039 HeapObject* heap_object = HeapObject::cast(
object);
4040 if (!Heap::InNewSpace(heap_object))
return object;
4043 DCHECK(!marking_state_->IsBlack(heap_object));
4044 if (marking_state_->IsGrey(heap_object)) {
4051 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4056 void MinorMarkCompactCollector::ClearNonLiveReferences() {
4057 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
4060 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
4063 YoungGenerationExternalStringTableCleaner external_visitor(
this);
4064 heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
4065 heap()->external_string_table_.CleanUpNewSpaceStrings();
4069 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
4071 MinorMarkCompactWeakObjectRetainer retainer(
this);
4072 heap()->ProcessYoungWeakReferences(&retainer);
4076 void MinorMarkCompactCollector::EvacuatePrologue() {
4077 NewSpace* new_space = heap()->new_space();
4080 PageRange(new_space->first_allocatable_address(), new_space->top())) {
4081 new_space_evacuation_pages_.push_back(p);
4084 new_space->ResetLinearAllocationArea();
4087 void MinorMarkCompactCollector::EvacuateEpilogue() {
4088 heap()->new_space()->set_age_mark(heap()->new_space()->top());
4090 heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4093 UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4094 MemoryChunk* chunk, Address start, Address end) {
4095 return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4096 chunk, start, end, non_atomic_marking_state());
4099 UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4100 MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4101 return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4102 heap(), non_atomic_marking_state(), chunk, updating_mode);
4106 class GlobalHandlesMarkingItem;
4107 class PageMarkingItem;
4108 class RootMarkingItem;
4109 class YoungGenerationMarkingTask;
4111 class MarkingItem :
public ItemParallelJob::Item {
4113 ~MarkingItem()
override =
default;
4114 virtual void Process(YoungGenerationMarkingTask* task) = 0;
4117 class YoungGenerationMarkingTask :
public ItemParallelJob::Task {
4119 YoungGenerationMarkingTask(
4120 Isolate* isolate, MinorMarkCompactCollector* collector,
4121 MinorMarkCompactCollector::MarkingWorklist* global_worklist,
int task_id)
4122 : ItemParallelJob::Task(isolate),
4123 collector_(collector),
4124 marking_worklist_(global_worklist, task_id),
4125 marking_state_(collector->marking_state()),
4126 visitor_(marking_state_, global_worklist, task_id) {
4127 local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4131 void RunInParallel()
override {
4132 TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
4133 GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4134 double marking_time = 0.0;
4136 TimedScope scope(&marking_time);
4137 MarkingItem* item =
nullptr;
4138 while ((item = GetItem<MarkingItem>()) !=
nullptr) {
4139 item->Process(
this);
4140 item->MarkFinished();
4141 EmptyLocalMarkingWorklist();
4143 EmptyMarkingWorklist();
4144 DCHECK(marking_worklist_.IsLocalEmpty());
4147 if (FLAG_trace_minor_mc_parallel_marking) {
4148 PrintIsolate(collector_->isolate(),
"marking[%p]: time=%f\n",
4149 static_cast<void*
>(
this), marking_time);
4153 void MarkObject(Object*
object) {
4154 if (!Heap::InNewSpace(
object))
return;
4155 HeapObject* heap_object = HeapObject::cast(
object);
4156 if (marking_state_->WhiteToGrey(heap_object)) {
4157 const int size = visitor_.Visit(heap_object);
4158 IncrementLiveBytes(heap_object, size);
4163 void EmptyLocalMarkingWorklist() {
4164 HeapObject*
object =
nullptr;
4165 while (marking_worklist_.Pop(&
object)) {
4166 const int size = visitor_.Visit(
object);
4167 IncrementLiveBytes(
object, size);
4171 void EmptyMarkingWorklist() {
4172 HeapObject*
object =
nullptr;
4173 while (marking_worklist_.Pop(&
object)) {
4174 const int size = visitor_.Visit(
object);
4175 IncrementLiveBytes(
object, size);
4179 void IncrementLiveBytes(HeapObject*
object, intptr_t bytes) {
4180 local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(
object))] +=
4184 void FlushLiveBytes() {
4185 for (
auto pair : local_live_bytes_) {
4186 marking_state_->IncrementLiveBytes(pair.first, pair.second);
4190 MinorMarkCompactCollector* collector_;
4191 MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4192 MinorMarkCompactCollector::MarkingState* marking_state_;
4193 YoungGenerationMarkingVisitor visitor_;
4194 std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4197 class PageMarkingItem :
public MarkingItem {
4199 explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
4200 : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
4201 ~PageMarkingItem()
override { *global_slots_ = *global_slots_ + slots_; }
4203 void Process(YoungGenerationMarkingTask* task)
override {
4204 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
4205 "PageMarkingItem::Process");
4206 base::MutexGuard guard(chunk_->mutex());
4207 MarkUntypedPointers(task);
4208 MarkTypedPointers(task);
4212 inline Heap* heap() {
return chunk_->heap(); }
4214 void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4215 RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
4216 [
this, task](MaybeObjectSlot slot) {
4217 return CheckAndMarkObject(task, slot);
4219 SlotSet::PREFREE_EMPTY_BUCKETS);
4222 void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4223 RememberedSet<OLD_TO_NEW>::IterateTyped(
4225 [
this, task](SlotType slot_type, Address host_addr, Address slot) {
4226 return UpdateTypedSlotHelper::UpdateTypedSlot(
4227 heap(), slot_type, slot, [
this, task](MaybeObjectSlot slot) {
4228 return CheckAndMarkObject(task, slot);
4233 SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
4234 MaybeObjectSlot slot) {
4235 MaybeObject
object = *slot;
4236 if (Heap::InNewSpace(
object)) {
4239 DCHECK(Heap::InToSpace(
object));
4240 HeapObject* heap_object;
4241 bool success =
object->GetHeapObject(&heap_object);
4244 task->MarkObject(heap_object);
4251 MemoryChunk* chunk_;
4252 std::atomic<int>* global_slots_;
4256 class GlobalHandlesMarkingItem :
public MarkingItem {
4258 GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
4259 size_t start,
size_t end)
4260 : global_handles_(global_handles), start_(start), end_(end) {}
4261 ~GlobalHandlesMarkingItem()
override =
default;
4263 void Process(YoungGenerationMarkingTask* task)
override {
4264 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
4265 "GlobalHandlesMarkingItem::Process");
4266 GlobalHandlesRootMarkingVisitor visitor(task);
4268 ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
4269 &visitor, start_, end_);
4273 class GlobalHandlesRootMarkingVisitor :
public RootVisitor {
4275 explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
4278 void VisitRootPointer(Root root,
const char* description,
4279 ObjectSlot p)
override {
4280 DCHECK_EQ(Root::kGlobalHandles, root);
4281 task_->MarkObject(*p);
4284 void VisitRootPointers(Root root,
const char* description, ObjectSlot start,
4285 ObjectSlot end)
override {
4286 DCHECK_EQ(Root::kGlobalHandles, root);
4287 for (ObjectSlot p = start; p < end; ++p) {
4288 task_->MarkObject(*p);
4293 YoungGenerationMarkingTask* task_;
4296 GlobalHandles* global_handles_;
4301 void MinorMarkCompactCollector::MarkRootSetInParallel(
4302 RootMarkingVisitor* root_visitor) {
4303 std::atomic<int> slots;
4305 ItemParallelJob job(isolate()->cancelable_task_manager(),
4306 &page_parallel_job_semaphore_);
4310 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4311 heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
4313 SeedGlobalHandles<GlobalHandlesMarkingItem>(
4314 heap(), isolate()->global_handles(), &job);
4316 RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4317 heap(), [&job, &slots](MemoryChunk* chunk) {
4318 job.AddItem(
new PageMarkingItem(chunk, &slots));
4324 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4325 const int new_space_pages =
4326 static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
4327 const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
4328 for (
int i = 0;
i < num_tasks;
i++) {
4330 new YoungGenerationMarkingTask(isolate(),
this, worklist(),
i));
4332 job.Run(isolate()->async_counters());
4333 DCHECK(worklist()->IsEmpty());
4336 old_to_new_slots_ = slots;
4339 void MinorMarkCompactCollector::MarkLiveObjects() {
4340 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4342 PostponeInterruptsScope postpone(isolate());
4344 RootMarkingVisitor root_visitor(
this);
4346 MarkRootSetInParallel(&root_visitor);
4350 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4351 ProcessMarkingWorklist();
4355 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4356 isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
4357 &IsUnmarkedObjectForYoungGeneration);
4360 ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
4363 ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
4364 &root_visitor, &IsUnmarkedObjectForYoungGeneration);
4365 ProcessMarkingWorklist();
4369 void MinorMarkCompactCollector::ProcessMarkingWorklist() {
4370 MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
4371 HeapObject*
object =
nullptr;
4372 while (marking_worklist.Pop(&
object)) {
4373 DCHECK(!object->IsFiller());
4374 DCHECK(object->IsHeapObject());
4375 DCHECK(heap()->Contains(
object));
4376 DCHECK(non_atomic_marking_state()->IsGrey(
object));
4377 main_marking_visitor()->Visit(
object);
4379 DCHECK(marking_worklist.IsLocalEmpty());
4382 void MinorMarkCompactCollector::Evacuate() {
4383 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
4384 base::MutexGuard guard(heap()->relocation_mutex());
4387 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
4392 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
4393 EvacuatePagesInParallel();
4396 UpdatePointersAfterEvacuation();
4399 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
4400 if (!heap()->new_space()->Rebalance()) {
4401 heap()->FatalProcessOutOfMemory(
"NewSpace::Rebalance");
4406 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
4407 for (Page* p : new_space_evacuation_pages_) {
4408 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
4409 p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4410 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
4411 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4412 p->SetFlag(Page::SWEEP_TO_ITERATE);
4413 sweep_to_iterate_pages_.push_back(p);
4416 new_space_evacuation_pages_.clear();
4420 TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
4427 class YoungGenerationEvacuator :
public Evacuator {
4429 YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
4430 RecordMigratedSlotVisitor* record_visitor)
4431 : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
4433 GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope()
override {
4434 return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
4438 void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes)
override;
4440 MinorMarkCompactCollector* collector_;
4443 void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
4444 intptr_t* live_bytes) {
4445 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
4446 "YoungGenerationEvacuator::RawEvacuatePage");
4447 MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
4448 collector_->non_atomic_marking_state();
4449 *live_bytes = marking_state->live_bytes(chunk);
4450 switch (ComputeEvacuationMode(chunk)) {
4451 case kObjectsNewToOld:
4452 LiveObjectVisitor::VisitGreyObjectsNoFail(
4453 chunk, marking_state, &new_space_visitor_,
4454 LiveObjectVisitor::kClearMarkbits);
4458 LiveObjectVisitor::VisitGreyObjectsNoFail(
4459 chunk, marking_state, &new_to_old_page_visitor_,
4460 LiveObjectVisitor::kKeepMarking);
4461 new_to_old_page_visitor_.account_moved_bytes(
4462 marking_state->live_bytes(chunk));
4463 if (chunk->owner()->identity() != NEW_LO_SPACE) {
4466 ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4467 if (heap()->ShouldZapGarbage()) {
4468 collector_->MakeIterable(static_cast<Page*>(chunk),
4469 MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4470 }
else if (heap()->incremental_marking()->IsMarking()) {
4474 collector_->MakeIterable(static_cast<Page*>(chunk),
4475 MarkingTreatmentMode::KEEP,
4481 LiveObjectVisitor::VisitGreyObjectsNoFail(
4482 chunk, marking_state, &new_to_new_page_visitor_,
4483 LiveObjectVisitor::kKeepMarking);
4484 new_to_new_page_visitor_.account_moved_bytes(
4485 marking_state->live_bytes(chunk));
4486 DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
4489 ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4490 if (heap()->ShouldZapGarbage()) {
4491 collector_->MakeIterable(static_cast<Page*>(chunk),
4492 MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4493 }
else if (heap()->incremental_marking()->IsMarking()) {
4497 collector_->MakeIterable(static_cast<Page*>(chunk),
4498 MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
4501 case kObjectsOldToOld:
4509 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
4510 ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
4511 &page_parallel_job_semaphore_);
4512 intptr_t live_bytes = 0;
4514 for (Page* page : new_space_evacuation_pages_) {
4515 intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
4516 if (live_bytes_on_page == 0 && !page->contains_array_buffers())
continue;
4517 live_bytes += live_bytes_on_page;
4518 if (ShouldMovePage(page, live_bytes_on_page)) {
4519 if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
4520 EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
4522 EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
4525 evacuation_job.AddItem(
new EvacuationItem(page));
4527 if (evacuation_job.NumberOfItems() == 0)
return;
4529 YoungGenerationMigrationObserver observer(heap(),
4530 heap()->mark_compact_collector());
4531 YoungGenerationRecordMigratedSlotVisitor record_visitor(
4532 heap()->mark_compact_collector());
4533 CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
4534 this, &evacuation_job, &record_visitor, &observer, live_bytes);
4537 int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4538 ItemParallelJob* job) {
4540 for (Page* p : new_space_evacuation_pages_) {
4541 if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4542 if (p->local_tracker() ==
nullptr)
continue;
4545 job->AddItem(
new ArrayBufferTrackerUpdatingItem(
4546 p, ArrayBufferTrackerUpdatingItem::kRegular));
4552 #endif // ENABLE_MINOR_MC