V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
mark-compact.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/mark-compact.h"
6 
7 #include <unordered_map>
8 
9 #include "src/base/utils/random-number-generator.h"
10 #include "src/cancelable-task.h"
11 #include "src/code-stubs.h"
12 #include "src/compilation-cache.h"
13 #include "src/deoptimizer.h"
14 #include "src/execution.h"
15 #include "src/frames-inl.h"
16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-collector.h"
18 #include "src/heap/array-buffer-tracker-inl.h"
19 #include "src/heap/gc-tracer.h"
20 #include "src/heap/incremental-marking-inl.h"
21 #include "src/heap/invalidated-slots-inl.h"
22 #include "src/heap/item-parallel-job.h"
23 #include "src/heap/local-allocator-inl.h"
24 #include "src/heap/mark-compact-inl.h"
25 #include "src/heap/object-stats.h"
26 #include "src/heap/objects-visiting-inl.h"
27 #include "src/heap/spaces-inl.h"
28 #include "src/heap/sweeper.h"
29 #include "src/heap/worklist.h"
30 #include "src/ic/stub-cache.h"
31 #include "src/objects/hash-table-inl.h"
32 #include "src/objects/js-objects-inl.h"
33 #include "src/objects/maybe-object.h"
34 #include "src/objects/slots-inl.h"
35 #include "src/transitions-inl.h"
36 #include "src/utils-inl.h"
37 #include "src/v8.h"
38 #include "src/vm-state-inl.h"
39 
40 namespace v8 {
41 namespace internal {
42 
43 const char* Marking::kWhiteBitPattern = "00";
44 const char* Marking::kBlackBitPattern = "11";
45 const char* Marking::kGreyBitPattern = "10";
46 const char* Marking::kImpossibleBitPattern = "01";
47 
48 // The following has to hold in order for {MarkingState::MarkBitFrom} to not
49 // produce invalid {kImpossibleBitPattern} in the marking bitmap by overlapping.
50 STATIC_ASSERT(Heap::kMinObjectSizeInTaggedWords >= 2);
51 
52 // =============================================================================
53 // Verifiers
54 // =============================================================================
55 
56 #ifdef VERIFY_HEAP
57 namespace {
58 
59 class MarkingVerifier : public ObjectVisitor, public RootVisitor {
60  public:
61  virtual void Run() = 0;
62 
63  protected:
64  explicit MarkingVerifier(Heap* heap) : heap_(heap) {}
65 
66  virtual Bitmap* bitmap(const MemoryChunk* chunk) = 0;
67 
68  virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
69  virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
70 
71  virtual bool IsMarked(HeapObject* object) = 0;
72 
73  virtual bool IsBlackOrGrey(HeapObject* object) = 0;
74 
75  void VisitPointers(HeapObject* host, ObjectSlot start,
76  ObjectSlot end) override {
77  VerifyPointers(start, end);
78  }
79 
80  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
81  MaybeObjectSlot end) override {
82  VerifyPointers(start, end);
83  }
84 
85  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
86  ObjectSlot end) override {
87  VerifyPointers(start, end);
88  }
89 
90  void VerifyRoots(VisitMode mode);
91  void VerifyMarkingOnPage(const Page* page, Address start, Address end);
92  void VerifyMarking(NewSpace* new_space);
93  void VerifyMarking(PagedSpace* paged_space);
94  void VerifyMarking(LargeObjectSpace* lo_space);
95 
96  Heap* heap_;
97 };
98 
99 void MarkingVerifier::VerifyRoots(VisitMode mode) {
100  heap_->IterateStrongRoots(this, mode);
101 }
102 
103 void MarkingVerifier::VerifyMarkingOnPage(const Page* page, Address start,
104  Address end) {
105  HeapObject* object;
106  Address next_object_must_be_here_or_later = start;
107  for (Address current = start; current < end;) {
108  object = HeapObject::FromAddress(current);
109  // One word fillers at the end of a black area can be grey.
110  if (IsBlackOrGrey(object) &&
111  object->map() != ReadOnlyRoots(heap_).one_pointer_filler_map()) {
112  CHECK(IsMarked(object));
113  CHECK(current >= next_object_must_be_here_or_later);
114  object->Iterate(this);
115  next_object_must_be_here_or_later = current + object->Size();
116  // The object is either part of a black area of black allocation or a
117  // regular black object
118  CHECK(
119  bitmap(page)->AllBitsSetInRange(
120  page->AddressToMarkbitIndex(current),
121  page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
122  bitmap(page)->AllBitsClearInRange(
123  page->AddressToMarkbitIndex(current + kPointerSize * 2),
124  page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
125  current = next_object_must_be_here_or_later;
126  } else {
127  current += kPointerSize;
128  }
129  }
130 }
131 
132 void MarkingVerifier::VerifyMarking(NewSpace* space) {
133  Address end = space->top();
134  // The bottom position is at the start of its page. Allows us to use
135  // page->area_start() as start of range on all pages.
136  CHECK_EQ(space->first_allocatable_address(),
137  space->first_page()->area_start());
138 
139  PageRange range(space->first_allocatable_address(), end);
140  for (auto it = range.begin(); it != range.end();) {
141  Page* page = *(it++);
142  Address limit = it != range.end() ? page->area_end() : end;
143  CHECK(limit == end || !page->Contains(end));
144  VerifyMarkingOnPage(page, page->area_start(), limit);
145  }
146 }
147 
148 void MarkingVerifier::VerifyMarking(PagedSpace* space) {
149  for (Page* p : *space) {
150  VerifyMarkingOnPage(p, p->area_start(), p->area_end());
151  }
152 }
153 
154 void MarkingVerifier::VerifyMarking(LargeObjectSpace* lo_space) {
155  LargeObjectIterator it(lo_space);
156  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
157  if (IsBlackOrGrey(obj)) {
158  obj->Iterate(this);
159  }
160  }
161 }
162 
163 class FullMarkingVerifier : public MarkingVerifier {
164  public:
165  explicit FullMarkingVerifier(Heap* heap)
166  : MarkingVerifier(heap),
167  marking_state_(
168  heap->mark_compact_collector()->non_atomic_marking_state()) {}
169 
170  void Run() override {
171  VerifyRoots(VISIT_ONLY_STRONG);
172  VerifyMarking(heap_->new_space());
173  VerifyMarking(heap_->old_space());
174  VerifyMarking(heap_->code_space());
175  VerifyMarking(heap_->map_space());
176  VerifyMarking(heap_->lo_space());
177  VerifyMarking(heap_->code_lo_space());
178  }
179 
180  protected:
181  Bitmap* bitmap(const MemoryChunk* chunk) override {
182  return marking_state_->bitmap(chunk);
183  }
184 
185  bool IsMarked(HeapObject* object) override {
186  return marking_state_->IsBlack(object);
187  }
188 
189  bool IsBlackOrGrey(HeapObject* object) override {
190  return marking_state_->IsBlackOrGrey(object);
191  }
192 
193  void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
194  for (ObjectSlot current = start; current < end; ++current) {
195  if ((*current)->IsHeapObject()) {
196  HeapObject* object = HeapObject::cast(*current);
197  CHECK(marking_state_->IsBlackOrGrey(object));
198  }
199  }
200  }
201 
202  void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
203  for (MaybeObjectSlot current = start; current < end; ++current) {
204  HeapObject* object;
205  if ((*current)->GetHeapObjectIfStrong(&object)) {
206  CHECK(marking_state_->IsBlackOrGrey(object));
207  }
208  }
209  }
210 
211  void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
212  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
213  if (!host->IsWeakObject(rinfo->target_object())) {
214  Object* p = rinfo->target_object();
215  VisitPointer(host, ObjectSlot(&p));
216  }
217  }
218 
219  private:
220  MarkCompactCollector::NonAtomicMarkingState* marking_state_;
221 };
222 
223 class EvacuationVerifier : public ObjectVisitor, public RootVisitor {
224  public:
225  virtual void Run() = 0;
226 
227  void VisitPointers(HeapObject* host, ObjectSlot start,
228  ObjectSlot end) override {
229  VerifyPointers(start, end);
230  }
231 
232  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
233  MaybeObjectSlot end) override {
234  VerifyPointers(start, end);
235  }
236 
237  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
238  ObjectSlot end) override {
239  VerifyPointers(start, end);
240  }
241 
242  protected:
243  explicit EvacuationVerifier(Heap* heap) : heap_(heap) {}
244 
245  inline Heap* heap() { return heap_; }
246 
247  virtual void VerifyPointers(ObjectSlot start, ObjectSlot end) = 0;
248  virtual void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) = 0;
249 
250  void VerifyRoots(VisitMode mode);
251  void VerifyEvacuationOnPage(Address start, Address end);
252  void VerifyEvacuation(NewSpace* new_space);
253  void VerifyEvacuation(PagedSpace* paged_space);
254 
255  Heap* heap_;
256 };
257 
258 void EvacuationVerifier::VerifyRoots(VisitMode mode) {
259  heap_->IterateStrongRoots(this, mode);
260 }
261 
262 void EvacuationVerifier::VerifyEvacuationOnPage(Address start, Address end) {
263  Address current = start;
264  while (current < end) {
265  HeapObject* object = HeapObject::FromAddress(current);
266  if (!object->IsFiller()) object->Iterate(this);
267  current += object->Size();
268  }
269 }
270 
271 void EvacuationVerifier::VerifyEvacuation(NewSpace* space) {
272  PageRange range(space->first_allocatable_address(), space->top());
273  for (auto it = range.begin(); it != range.end();) {
274  Page* page = *(it++);
275  Address current = page->area_start();
276  Address limit = it != range.end() ? page->area_end() : space->top();
277  CHECK(limit == space->top() || !page->Contains(space->top()));
278  VerifyEvacuationOnPage(current, limit);
279  }
280 }
281 
282 void EvacuationVerifier::VerifyEvacuation(PagedSpace* space) {
283  for (Page* p : *space) {
284  if (p->IsEvacuationCandidate()) continue;
285  if (p->Contains(space->top())) {
286  CodePageMemoryModificationScope memory_modification_scope(p);
287  heap_->CreateFillerObjectAt(
288  space->top(), static_cast<int>(space->limit() - space->top()),
289  ClearRecordedSlots::kNo);
290  }
291  VerifyEvacuationOnPage(p->area_start(), p->area_end());
292  }
293 }
294 
295 class FullEvacuationVerifier : public EvacuationVerifier {
296  public:
297  explicit FullEvacuationVerifier(Heap* heap) : EvacuationVerifier(heap) {}
298 
299  void Run() override {
300  VerifyRoots(VISIT_ALL);
301  VerifyEvacuation(heap_->new_space());
302  VerifyEvacuation(heap_->old_space());
303  VerifyEvacuation(heap_->code_space());
304  VerifyEvacuation(heap_->map_space());
305  }
306 
307  protected:
308  void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
309  for (ObjectSlot current = start; current < end; ++current) {
310  if ((*current)->IsHeapObject()) {
311  HeapObject* object = HeapObject::cast(*current);
312  if (Heap::InNewSpace(object)) {
313  CHECK(Heap::InToSpace(object));
314  }
315  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
316  }
317  }
318  }
319  void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
320  for (MaybeObjectSlot current = start; current < end; ++current) {
321  HeapObject* object;
322  if ((*current)->GetHeapObjectIfStrong(&object)) {
323  if (Heap::InNewSpace(object)) {
324  CHECK(Heap::InToSpace(object));
325  }
326  CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
327  }
328  }
329  }
330 };
331 
332 } // namespace
333 #endif // VERIFY_HEAP
334 
335 // =============================================================================
336 // MarkCompactCollectorBase, MinorMarkCompactCollector, MarkCompactCollector
337 // =============================================================================
338 
339 using MarkCompactMarkingVisitor =
340  MarkingVisitor<FixedArrayVisitationMode::kRegular,
341  TraceRetainingPathMode::kEnabled,
342  MarkCompactCollector::MarkingState>;
343 
344 namespace {
345 
346 int NumberOfAvailableCores() {
347  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
348  // This number of cores should be greater than zero and never change.
349  DCHECK_GE(num_cores, 1);
350  DCHECK_EQ(num_cores, V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1);
351  return num_cores;
352 }
353 
354 } // namespace
355 
356 int MarkCompactCollectorBase::NumberOfParallelCompactionTasks(int pages) {
357  DCHECK_GT(pages, 0);
358  int tasks =
359  FLAG_parallel_compaction ? Min(NumberOfAvailableCores(), pages) : 1;
360  if (!heap_->CanExpandOldGeneration(
361  static_cast<size_t>(tasks * Page::kPageSize))) {
362  // Optimize for memory usage near the heap limit.
363  tasks = 1;
364  }
365  return tasks;
366 }
367 
368 int MarkCompactCollectorBase::NumberOfParallelPointerUpdateTasks(int pages,
369  int slots) {
370  DCHECK_GT(pages, 0);
371  // Limit the number of update tasks as task creation often dominates the
372  // actual work that is being done.
373  const int kMaxPointerUpdateTasks = 8;
374  const int kSlotsPerTask = 600;
375  const int wanted_tasks =
376  (slots >= 0) ? Max(1, Min(pages, slots / kSlotsPerTask)) : pages;
377  return FLAG_parallel_pointer_update
378  ? Min(kMaxPointerUpdateTasks,
379  Min(NumberOfAvailableCores(), wanted_tasks))
380  : 1;
381 }
382 
383 int MarkCompactCollectorBase::NumberOfParallelToSpacePointerUpdateTasks(
384  int pages) {
385  DCHECK_GT(pages, 0);
386  // No cap needed because all pages we need to process are fully filled with
387  // interesting objects.
388  return FLAG_parallel_pointer_update ? Min(NumberOfAvailableCores(), pages)
389  : 1;
390 }
391 
392 MarkCompactCollector::MarkCompactCollector(Heap* heap)
393  : MarkCompactCollectorBase(heap),
394  page_parallel_job_semaphore_(0),
395 #ifdef DEBUG
396  state_(IDLE),
397 #endif
398  was_marked_incrementally_(false),
399  evacuation_(false),
400  compacting_(false),
401  black_allocation_(false),
402  have_code_to_deoptimize_(false),
403  marking_worklist_(heap),
404  sweeper_(new Sweeper(heap, non_atomic_marking_state())) {
405  old_to_new_slots_ = -1;
406 }
407 
408 MarkCompactCollector::~MarkCompactCollector() { delete sweeper_; }
409 
410 void MarkCompactCollector::SetUp() {
411  DCHECK_EQ(0, strcmp(Marking::kWhiteBitPattern, "00"));
412  DCHECK_EQ(0, strcmp(Marking::kBlackBitPattern, "11"));
413  DCHECK_EQ(0, strcmp(Marking::kGreyBitPattern, "10"));
414  DCHECK_EQ(0, strcmp(Marking::kImpossibleBitPattern, "01"));
415 }
416 
417 void MarkCompactCollector::TearDown() {
418  AbortCompaction();
419  AbortWeakObjects();
420  if (heap()->incremental_marking()->IsMarking()) {
421  marking_worklist()->Clear();
422  }
423 }
424 
425 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
426  DCHECK(!p->NeverEvacuate());
427  p->MarkEvacuationCandidate();
428  evacuation_candidates_.push_back(p);
429 }
430 
431 
432 static void TraceFragmentation(PagedSpace* space) {
433  int number_of_pages = space->CountTotalPages();
434  intptr_t reserved = (number_of_pages * space->AreaSize());
435  intptr_t free = reserved - space->SizeOfObjects();
436  PrintF("[%s]: %d pages, %d (%.1f%%) free\n", space->name(), number_of_pages,
437  static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
438 }
439 
440 bool MarkCompactCollector::StartCompaction() {
441  if (!compacting_) {
442  DCHECK(evacuation_candidates_.empty());
443 
444  CollectEvacuationCandidates(heap()->old_space());
445 
446  if (FLAG_compact_code_space) {
447  CollectEvacuationCandidates(heap()->code_space());
448  } else if (FLAG_trace_fragmentation) {
449  TraceFragmentation(heap()->code_space());
450  }
451 
452  if (FLAG_trace_fragmentation) {
453  TraceFragmentation(heap()->map_space());
454  }
455 
456  compacting_ = !evacuation_candidates_.empty();
457  }
458 
459  return compacting_;
460 }
461 
462 void MarkCompactCollector::CollectGarbage() {
463  // Make sure that Prepare() has been called. The individual steps below will
464  // update the state as they proceed.
465  DCHECK(state_ == PREPARE_GC);
466 
467 #ifdef ENABLE_MINOR_MC
468  heap()->minor_mark_compact_collector()->CleanupSweepToIteratePages();
469 #endif // ENABLE_MINOR_MC
470 
471  MarkLiveObjects();
472  ClearNonLiveReferences();
473  VerifyMarking();
474 
475  RecordObjectStats();
476 
477  StartSweepSpaces();
478 
479  Evacuate();
480 
481  Finish();
482 }
483 
484 #ifdef VERIFY_HEAP
485 void MarkCompactCollector::VerifyMarkbitsAreDirty(PagedSpace* space) {
486  HeapObjectIterator iterator(space);
487  while (HeapObject* object = iterator.Next()) {
488  CHECK(non_atomic_marking_state()->IsBlack(object));
489  }
490 }
491 
492 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
493  for (Page* p : *space) {
494  CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
495  CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
496  }
497 }
498 
499 
500 void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
501  for (Page* p : PageRange(space->first_allocatable_address(), space->top())) {
502  CHECK(non_atomic_marking_state()->bitmap(p)->IsClean());
503  CHECK_EQ(0, non_atomic_marking_state()->live_bytes(p));
504  }
505 }
506 
507 void MarkCompactCollector::VerifyMarkbitsAreClean(LargeObjectSpace* space) {
508  LargeObjectIterator it(space);
509  for (HeapObject* obj = it.Next(); obj != nullptr; obj = it.Next()) {
510  CHECK(non_atomic_marking_state()->IsWhite(obj));
511  CHECK_EQ(0, non_atomic_marking_state()->live_bytes(
512  MemoryChunk::FromAddress(obj->address())));
513  }
514 }
515 
516 void MarkCompactCollector::VerifyMarkbitsAreClean() {
517  VerifyMarkbitsAreClean(heap_->old_space());
518  VerifyMarkbitsAreClean(heap_->code_space());
519  VerifyMarkbitsAreClean(heap_->map_space());
520  VerifyMarkbitsAreClean(heap_->new_space());
521  // Read-only space should always be black since we never collect any objects
522  // in it or linked from it.
523  VerifyMarkbitsAreDirty(heap_->read_only_space());
524  VerifyMarkbitsAreClean(heap_->lo_space());
525  VerifyMarkbitsAreClean(heap_->code_lo_space());
526  VerifyMarkbitsAreClean(heap_->new_lo_space());
527 }
528 
529 #endif // VERIFY_HEAP
530 
531 void MarkCompactCollector::EnsureSweepingCompleted() {
532  if (!sweeper()->sweeping_in_progress()) return;
533 
534  sweeper()->EnsureCompleted();
535  heap()->old_space()->RefillFreeList();
536  heap()->code_space()->RefillFreeList();
537  heap()->map_space()->RefillFreeList();
538 
539 #ifdef VERIFY_HEAP
540  if (FLAG_verify_heap && !evacuation()) {
541  FullEvacuationVerifier verifier(heap());
542  verifier.Run();
543  }
544 #endif
545 }
546 
547 void MarkCompactCollector::ComputeEvacuationHeuristics(
548  size_t area_size, int* target_fragmentation_percent,
549  size_t* max_evacuated_bytes) {
550  // For memory reducing and optimize for memory mode we directly define both
551  // constants.
552  const int kTargetFragmentationPercentForReduceMemory = 20;
553  const size_t kMaxEvacuatedBytesForReduceMemory = 12 * MB;
554  const int kTargetFragmentationPercentForOptimizeMemory = 20;
555  const size_t kMaxEvacuatedBytesForOptimizeMemory = 6 * MB;
556 
557  // For regular mode (which is latency critical) we define less aggressive
558  // defaults to start and switch to a trace-based (using compaction speed)
559  // approach as soon as we have enough samples.
560  const int kTargetFragmentationPercent = 70;
561  const size_t kMaxEvacuatedBytes = 4 * MB;
562  // Time to take for a single area (=payload of page). Used as soon as there
563  // exist enough compaction speed samples.
564  const float kTargetMsPerArea = .5;
565 
566  if (heap()->ShouldReduceMemory()) {
567  *target_fragmentation_percent = kTargetFragmentationPercentForReduceMemory;
568  *max_evacuated_bytes = kMaxEvacuatedBytesForReduceMemory;
569  } else if (heap()->ShouldOptimizeForMemoryUsage()) {
570  *target_fragmentation_percent =
571  kTargetFragmentationPercentForOptimizeMemory;
572  *max_evacuated_bytes = kMaxEvacuatedBytesForOptimizeMemory;
573  } else {
574  const double estimated_compaction_speed =
575  heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
576  if (estimated_compaction_speed != 0) {
577  // Estimate the target fragmentation based on traced compaction speed
578  // and a goal for a single page.
579  const double estimated_ms_per_area =
580  1 + area_size / estimated_compaction_speed;
581  *target_fragmentation_percent = static_cast<int>(
582  100 - 100 * kTargetMsPerArea / estimated_ms_per_area);
583  if (*target_fragmentation_percent <
584  kTargetFragmentationPercentForReduceMemory) {
585  *target_fragmentation_percent =
586  kTargetFragmentationPercentForReduceMemory;
587  }
588  } else {
589  *target_fragmentation_percent = kTargetFragmentationPercent;
590  }
591  *max_evacuated_bytes = kMaxEvacuatedBytes;
592  }
593 }
594 
595 void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
596  DCHECK(space->identity() == OLD_SPACE || space->identity() == CODE_SPACE);
597 
598  int number_of_pages = space->CountTotalPages();
599  size_t area_size = space->AreaSize();
600 
601  // Pairs of (live_bytes_in_page, page).
602  typedef std::pair<size_t, Page*> LiveBytesPagePair;
603  std::vector<LiveBytesPagePair> pages;
604  pages.reserve(number_of_pages);
605 
606  DCHECK(!sweeping_in_progress());
607  Page* owner_of_linear_allocation_area =
608  space->top() == space->limit()
609  ? nullptr
610  : Page::FromAllocationAreaAddress(space->top());
611  for (Page* p : *space) {
612  if (p->NeverEvacuate() || (p == owner_of_linear_allocation_area) ||
613  !p->CanAllocate())
614  continue;
615  // Invariant: Evacuation candidates are just created when marking is
616  // started. This means that sweeping has finished. Furthermore, at the end
617  // of a GC all evacuation candidates are cleared and their slot buffers are
618  // released.
619  CHECK(!p->IsEvacuationCandidate());
620  CHECK_NULL(p->slot_set<OLD_TO_OLD>());
621  CHECK_NULL(p->typed_slot_set<OLD_TO_OLD>());
622  CHECK(p->SweepingDone());
623  DCHECK(p->area_size() == area_size);
624  pages.push_back(std::make_pair(p->allocated_bytes(), p));
625  }
626 
627  int candidate_count = 0;
628  size_t total_live_bytes = 0;
629 
630  const bool reduce_memory = heap()->ShouldReduceMemory();
631  if (FLAG_manual_evacuation_candidates_selection) {
632  for (size_t i = 0; i < pages.size(); i++) {
633  Page* p = pages[i].second;
634  if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
635  candidate_count++;
636  total_live_bytes += pages[i].first;
637  p->ClearFlag(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING);
638  AddEvacuationCandidate(p);
639  }
640  }
641  } else if (FLAG_stress_compaction_random) {
642  double fraction = isolate()->fuzzer_rng()->NextDouble();
643  size_t pages_to_mark_count =
644  static_cast<size_t>(fraction * (pages.size() + 1));
645  for (uint64_t i : isolate()->fuzzer_rng()->NextSample(
646  pages.size(), pages_to_mark_count)) {
647  candidate_count++;
648  total_live_bytes += pages[i].first;
649  AddEvacuationCandidate(pages[i].second);
650  }
651  } else if (FLAG_stress_compaction) {
652  for (size_t i = 0; i < pages.size(); i++) {
653  Page* p = pages[i].second;
654  if (i % 2 == 0) {
655  candidate_count++;
656  total_live_bytes += pages[i].first;
657  AddEvacuationCandidate(p);
658  }
659  }
660  } else {
661  // The following approach determines the pages that should be evacuated.
662  //
663  // We use two conditions to decide whether a page qualifies as an evacuation
664  // candidate, or not:
665  // * Target fragmentation: How fragmented is a page, i.e., how is the ratio
666  // between live bytes and capacity of this page (= area).
667  // * Evacuation quota: A global quota determining how much bytes should be
668  // compacted.
669  //
670  // The algorithm sorts all pages by live bytes and then iterates through
671  // them starting with the page with the most free memory, adding them to the
672  // set of evacuation candidates as long as both conditions (fragmentation
673  // and quota) hold.
674  size_t max_evacuated_bytes;
675  int target_fragmentation_percent;
676  ComputeEvacuationHeuristics(area_size, &target_fragmentation_percent,
677  &max_evacuated_bytes);
678 
679  const size_t free_bytes_threshold =
680  target_fragmentation_percent * (area_size / 100);
681 
682  // Sort pages from the most free to the least free, then select
683  // the first n pages for evacuation such that:
684  // - the total size of evacuated objects does not exceed the specified
685  // limit.
686  // - fragmentation of (n+1)-th page does not exceed the specified limit.
687  std::sort(pages.begin(), pages.end(),
688  [](const LiveBytesPagePair& a, const LiveBytesPagePair& b) {
689  return a.first < b.first;
690  });
691  for (size_t i = 0; i < pages.size(); i++) {
692  size_t live_bytes = pages[i].first;
693  DCHECK_GE(area_size, live_bytes);
694  size_t free_bytes = area_size - live_bytes;
695  if (FLAG_always_compact ||
696  ((free_bytes >= free_bytes_threshold) &&
697  ((total_live_bytes + live_bytes) <= max_evacuated_bytes))) {
698  candidate_count++;
699  total_live_bytes += live_bytes;
700  }
701  if (FLAG_trace_fragmentation_verbose) {
702  PrintIsolate(isolate(),
703  "compaction-selection-page: space=%s free_bytes_page=%zu "
704  "fragmentation_limit_kb=%" PRIuS
705  " fragmentation_limit_percent=%d sum_compaction_kb=%zu "
706  "compaction_limit_kb=%zu\n",
707  space->name(), free_bytes / KB, free_bytes_threshold / KB,
708  target_fragmentation_percent, total_live_bytes / KB,
709  max_evacuated_bytes / KB);
710  }
711  }
712  // How many pages we will allocated for the evacuated objects
713  // in the worst case: ceil(total_live_bytes / area_size)
714  int estimated_new_pages =
715  static_cast<int>((total_live_bytes + area_size - 1) / area_size);
716  DCHECK_LE(estimated_new_pages, candidate_count);
717  int estimated_released_pages = candidate_count - estimated_new_pages;
718  // Avoid (compact -> expand) cycles.
719  if ((estimated_released_pages == 0) && !FLAG_always_compact) {
720  candidate_count = 0;
721  }
722  for (int i = 0; i < candidate_count; i++) {
723  AddEvacuationCandidate(pages[i].second);
724  }
725  }
726 
727  if (FLAG_trace_fragmentation) {
728  PrintIsolate(isolate(),
729  "compaction-selection: space=%s reduce_memory=%d pages=%d "
730  "total_live_bytes=%zu\n",
731  space->name(), reduce_memory, candidate_count,
732  total_live_bytes / KB);
733  }
734 }
735 
736 
737 void MarkCompactCollector::AbortCompaction() {
738  if (compacting_) {
739  RememberedSet<OLD_TO_OLD>::ClearAll(heap());
740  for (Page* p : evacuation_candidates_) {
741  p->ClearEvacuationCandidate();
742  }
743  compacting_ = false;
744  evacuation_candidates_.clear();
745  }
746  DCHECK(evacuation_candidates_.empty());
747 }
748 
749 
750 void MarkCompactCollector::Prepare() {
751  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
752 
753 #ifdef DEBUG
754  DCHECK(state_ == IDLE);
755  state_ = PREPARE_GC;
756 #endif
757 
758  DCHECK(!FLAG_never_compact || !FLAG_always_compact);
759 
760  // Instead of waiting we could also abort the sweeper threads here.
761  EnsureSweepingCompleted();
762 
763  if (heap()->incremental_marking()->IsSweeping()) {
764  heap()->incremental_marking()->Stop();
765  }
766 
767  heap()->memory_allocator()->unmapper()->PrepareForMarkCompact();
768 
769  if (!was_marked_incrementally_) {
770  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_PROLOGUE);
771  heap_->local_embedder_heap_tracer()->TracePrologue();
772  }
773 
774  // Don't start compaction if we are in the middle of incremental
775  // marking cycle. We did not collect any slots.
776  if (!FLAG_never_compact && !was_marked_incrementally_) {
777  StartCompaction();
778  }
779 
780  PagedSpaces spaces(heap());
781  for (PagedSpace* space = spaces.next(); space != nullptr;
782  space = spaces.next()) {
783  space->PrepareForMarkCompact();
784  }
785  heap()->account_external_memory_concurrently_freed();
786 
787 #ifdef VERIFY_HEAP
788  if (!was_marked_incrementally_ && FLAG_verify_heap) {
789  VerifyMarkbitsAreClean();
790  }
791 #endif
792 }
793 
794 void MarkCompactCollector::FinishConcurrentMarking(
795  ConcurrentMarking::StopRequest stop_request) {
796  // FinishConcurrentMarking is called for both, concurrent and parallel,
797  // marking. It is safe to call this function when tasks are already finished.
798  if (FLAG_parallel_marking || FLAG_concurrent_marking) {
799  heap()->concurrent_marking()->Stop(stop_request);
800  heap()->concurrent_marking()->FlushLiveBytes(non_atomic_marking_state());
801  }
802 }
803 
804 void MarkCompactCollector::VerifyMarking() {
805  CHECK(marking_worklist()->IsEmpty());
806  DCHECK(heap_->incremental_marking()->IsStopped());
807 #ifdef VERIFY_HEAP
808  if (FLAG_verify_heap) {
809  FullMarkingVerifier verifier(heap());
810  verifier.Run();
811  }
812 #endif
813 #ifdef VERIFY_HEAP
814  if (FLAG_verify_heap) {
815  heap()->old_space()->VerifyLiveBytes();
816  heap()->map_space()->VerifyLiveBytes();
817  heap()->code_space()->VerifyLiveBytes();
818  }
819 #endif
820 }
821 
822 void MarkCompactCollector::Finish() {
823  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH);
824 
825 #ifdef DEBUG
826  heap()->VerifyCountersBeforeConcurrentSweeping();
827 #endif
828 
829  CHECK(weak_objects_.current_ephemerons.IsEmpty());
830  CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
831  weak_objects_.next_ephemerons.Clear();
832 
833  sweeper()->StartSweeperTasks();
834  sweeper()->StartIterabilityTasks();
835 
836  // Clear the marking state of live large objects.
837  heap_->lo_space()->ClearMarkingStateOfLiveObjects();
838  heap_->code_lo_space()->ClearMarkingStateOfLiveObjects();
839 
840 #ifdef DEBUG
841  DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
842  state_ = IDLE;
843 #endif
844  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
845 
846  // The stub caches are not traversed during GC; clear them to force
847  // their lazy re-initialization. This must be done after the
848  // GC, because it relies on the new address of certain old space
849  // objects (empty string, illegal builtin).
850  isolate()->load_stub_cache()->Clear();
851  isolate()->store_stub_cache()->Clear();
852 
853  if (have_code_to_deoptimize_) {
854  // Some code objects were marked for deoptimization during the GC.
855  Deoptimizer::DeoptimizeMarkedCode(isolate());
856  have_code_to_deoptimize_ = false;
857  }
858 }
859 
861  public:
862  explicit RootMarkingVisitor(MarkCompactCollector* collector)
863  : collector_(collector) {}
864 
865  void VisitRootPointer(Root root, const char* description,
866  ObjectSlot p) final {
867  MarkObjectByPointer(root, p);
868  }
869 
870  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
871  ObjectSlot end) final {
872  for (ObjectSlot p = start; p < end; ++p) MarkObjectByPointer(root, p);
873  }
874 
875  private:
876  V8_INLINE void MarkObjectByPointer(Root root, ObjectSlot p) {
877  if (!(*p)->IsHeapObject()) return;
878 
879  collector_->MarkRootObject(root, HeapObject::cast(*p));
880  }
881 
882  MarkCompactCollector* const collector_;
883 };
884 
885 // This visitor is used to visit the body of special objects held alive by
886 // other roots.
887 //
888 // It is currently used for
889 // - Code held alive by the top optimized frame. This code cannot be deoptimized
890 // and thus have to be kept alive in an isolate way, i.e., it should not keep
891 // alive other code objects reachable through the weak list but they should
892 // keep alive its embedded pointers (which would otherwise be dropped).
893 // - Prefix of the string table.
895  : public ObjectVisitor {
896  public:
898  : collector_(collector) {}
899 
900  void VisitPointer(HeapObject* host, ObjectSlot p) final {
901  MarkObject(host, *p);
902  }
903 
904  void VisitPointers(HeapObject* host, ObjectSlot start, ObjectSlot end) final {
905  for (ObjectSlot p = start; p < end; ++p) {
906  DCHECK(!HasWeakHeapObjectTag(*p));
907  MarkObject(host, *p);
908  }
909  }
910 
911  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
912  MaybeObjectSlot end) final {
913  // At the moment, custom roots cannot contain weak pointers.
914  UNREACHABLE();
915  }
916 
917  // VisitEmbedderPointer is defined by ObjectVisitor to call VisitPointers.
918 
919  private:
920  void MarkObject(HeapObject* host, Object* object) {
921  if (!object->IsHeapObject()) return;
922  collector_->MarkObject(host, HeapObject::cast(object));
923  }
924 
925  MarkCompactCollector* const collector_;
926 };
927 
929  public:
931  : heap_(heap), pointers_removed_(0), table_(table) {}
932 
933  void VisitPointers(HeapObject* host, ObjectSlot start,
934  ObjectSlot end) override {
935  // Visit all HeapObject pointers in [start, end).
936  Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
938  heap_->mark_compact_collector()->non_atomic_marking_state();
939  for (ObjectSlot p = start; p < end; ++p) {
940  Object* o = *p;
941  if (o->IsHeapObject()) {
942  HeapObject* heap_object = HeapObject::cast(o);
943  if (marking_state->IsWhite(heap_object)) {
944  pointers_removed_++;
945  // Set the entry to the_hole_value (as deleted).
946  p.store(the_hole);
947  } else {
948  // StringTable contains only old space strings.
949  DCHECK(!Heap::InNewSpace(o));
950  MarkCompactCollector::RecordSlot(table_, p, heap_object);
951  }
952  }
953  }
954  }
955 
956  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
957  MaybeObjectSlot end) final {
958  UNREACHABLE();
959  }
960 
961  int PointersRemoved() {
962  return pointers_removed_;
963  }
964 
965  private:
966  Heap* heap_;
967  int pointers_removed_;
968  HeapObject* table_;
969 };
970 
972  public:
973  explicit ExternalStringTableCleaner(Heap* heap) : heap_(heap) {}
974 
975  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
976  ObjectSlot end) override {
977  // Visit all HeapObject pointers in [start, end).
979  heap_->mark_compact_collector()->non_atomic_marking_state();
980  Object* the_hole = ReadOnlyRoots(heap_).the_hole_value();
981  for (ObjectSlot p = start; p < end; ++p) {
982  Object* o = *p;
983  if (o->IsHeapObject()) {
984  HeapObject* heap_object = HeapObject::cast(o);
985  if (marking_state->IsWhite(heap_object)) {
986  if (o->IsExternalString()) {
987  heap_->FinalizeExternalString(String::cast(o));
988  } else {
989  // The original external string may have been internalized.
990  DCHECK(o->IsThinString());
991  }
992  // Set the entry to the_hole_value (as deleted).
993  p.store(the_hole);
994  }
995  }
996  }
997  }
998 
999  private:
1000  Heap* heap_;
1001 };
1002 
1003 // Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
1004 // are retained.
1006  public:
1009  : marking_state_(marking_state) {}
1010 
1011  Object* RetainAs(Object* object) override {
1012  HeapObject* heap_object = HeapObject::cast(object);
1013  DCHECK(!marking_state_->IsGrey(heap_object));
1014  if (marking_state_->IsBlack(heap_object)) {
1015  return object;
1016  } else if (object->IsAllocationSite() &&
1017  !(AllocationSite::cast(object)->IsZombie())) {
1018  // "dead" AllocationSites need to live long enough for a traversal of new
1019  // space. These sites get a one-time reprieve.
1020 
1021  Object* nested = object;
1022  while (nested->IsAllocationSite()) {
1023  AllocationSite* current_site = AllocationSite::cast(nested);
1024  // MarkZombie will override the nested_site, read it first before
1025  // marking
1026  nested = current_site->nested_site();
1027  current_site->MarkZombie();
1028  marking_state_->WhiteToBlack(current_site);
1029  }
1030 
1031  return object;
1032  } else {
1033  return nullptr;
1034  }
1035  }
1036 
1037  private:
1039 };
1040 
1042  public:
1043  explicit RecordMigratedSlotVisitor(MarkCompactCollector* collector)
1044  : collector_(collector) {}
1045 
1046  inline void VisitPointer(HeapObject* host, ObjectSlot p) final {
1047  DCHECK(!HasWeakHeapObjectTag(*p));
1048  RecordMigratedSlot(host, MaybeObject::FromObject(*p), p.address());
1049  }
1050 
1051  inline void VisitPointer(HeapObject* host, MaybeObjectSlot p) final {
1052  RecordMigratedSlot(host, *p, p.address());
1053  }
1054 
1055  inline void VisitPointers(HeapObject* host, ObjectSlot start,
1056  ObjectSlot end) final {
1057  while (start < end) {
1058  VisitPointer(host, start);
1059  ++start;
1060  }
1061  }
1062 
1063  inline void VisitPointers(HeapObject* host, MaybeObjectSlot start,
1064  MaybeObjectSlot end) final {
1065  while (start < end) {
1066  VisitPointer(host, start);
1067  ++start;
1068  }
1069  }
1070 
1071  inline void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
1072  DCHECK_EQ(host, rinfo->host());
1073  DCHECK(RelocInfo::IsCodeTargetMode(rinfo->rmode()));
1074  Code target = Code::GetCodeFromTargetAddress(rinfo->target_address());
1075  // The target is always in old space, we don't have to record the slot in
1076  // the old-to-new remembered set.
1077  DCHECK(!Heap::InNewSpace(target));
1078  collector_->RecordRelocSlot(host, rinfo, target);
1079  }
1080 
1081  inline void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
1082  DCHECK_EQ(host, rinfo->host());
1083  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
1084  HeapObject* object = HeapObject::cast(rinfo->target_object());
1085  GenerationalBarrierForCode(host, rinfo, object);
1086  collector_->RecordRelocSlot(host, rinfo, object);
1087  }
1088 
1089  // Entries that are skipped for recording.
1090  inline void VisitExternalReference(Code host, RelocInfo* rinfo) final {}
1091  inline void VisitExternalReference(Foreign* host, Address* p) final {}
1092  inline void VisitRuntimeEntry(Code host, RelocInfo* rinfo) final {}
1093  inline void VisitInternalReference(Code host, RelocInfo* rinfo) final {}
1094 
1095  protected:
1096  inline virtual void RecordMigratedSlot(HeapObject* host, MaybeObject value,
1097  Address slot) {
1098  if (value->IsStrongOrWeak()) {
1099  Page* p = Page::FromAddress(value.ptr());
1100  if (p->InNewSpace()) {
1101  DCHECK_IMPLIES(p->InToSpace(),
1102  p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
1103  RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
1104  Page::FromAddress(slot), slot);
1105  } else if (p->IsEvacuationCandidate()) {
1106  RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
1107  Page::FromAddress(slot), slot);
1108  }
1109  }
1110  }
1111 
1112  MarkCompactCollector* collector_;
1113 };
1114 
1116  public:
1117  explicit MigrationObserver(Heap* heap) : heap_(heap) {}
1118 
1119  virtual ~MigrationObserver() = default;
1120  virtual void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1121  int size) = 0;
1122 
1123  protected:
1124  Heap* heap_;
1125 };
1126 
1128  public:
1129  explicit ProfilingMigrationObserver(Heap* heap) : MigrationObserver(heap) {}
1130 
1131  inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
1132  int size) final {
1133  if (dest == CODE_SPACE || (dest == OLD_SPACE && dst->IsBytecodeArray())) {
1134  PROFILE(heap_->isolate(),
1135  CodeMoveEvent(AbstractCode::cast(src), AbstractCode::cast(dst)));
1136  }
1137  heap_->OnMoveEvent(dst, src, size);
1138  }
1139 };
1140 
1142  public:
1143  virtual ~HeapObjectVisitor() = default;
1144  virtual bool Visit(HeapObject* object, int size) = 0;
1145 };
1146 
1148  public:
1149  void AddObserver(MigrationObserver* observer) {
1150  migration_function_ = RawMigrateObject<MigrationMode::kObserved>;
1151  observers_.push_back(observer);
1152  }
1153 
1154  protected:
1155  enum MigrationMode { kFast, kObserved };
1156 
1157  typedef void (*MigrateFunction)(EvacuateVisitorBase* base, HeapObject* dst,
1158  HeapObject* src, int size,
1159  AllocationSpace dest);
1160 
1161  template <MigrationMode mode>
1162  static void RawMigrateObject(EvacuateVisitorBase* base, HeapObject* dst,
1163  HeapObject* src, int size,
1164  AllocationSpace dest) {
1165  Address dst_addr = dst->address();
1166  Address src_addr = src->address();
1167  DCHECK(base->heap_->AllowedToBeMigrated(src, dest));
1168  DCHECK_NE(dest, LO_SPACE);
1169  DCHECK_NE(dest, CODE_LO_SPACE);
1170  if (dest == OLD_SPACE) {
1171  DCHECK_OBJECT_SIZE(size);
1172  DCHECK(IsAligned(size, kPointerSize));
1173  base->heap_->CopyBlock(dst_addr, src_addr, size);
1174  if (mode != MigrationMode::kFast)
1175  base->ExecuteMigrationObservers(dest, src, dst, size);
1176  dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1177  } else if (dest == CODE_SPACE) {
1178  DCHECK_CODEOBJECT_SIZE(size, base->heap_->code_space());
1179  base->heap_->CopyBlock(dst_addr, src_addr, size);
1180  Code::cast(dst)->Relocate(dst_addr - src_addr);
1181  if (mode != MigrationMode::kFast)
1182  base->ExecuteMigrationObservers(dest, src, dst, size);
1183  dst->IterateBodyFast(dst->map(), size, base->record_visitor_);
1184  } else {
1185  DCHECK_OBJECT_SIZE(size);
1186  DCHECK(dest == NEW_SPACE);
1187  base->heap_->CopyBlock(dst_addr, src_addr, size);
1188  if (mode != MigrationMode::kFast)
1189  base->ExecuteMigrationObservers(dest, src, dst, size);
1190  }
1191  base::Relaxed_Store(reinterpret_cast<base::AtomicWord*>(src_addr),
1192  static_cast<base::AtomicWord>(dst_addr));
1193  }
1194 
1195  EvacuateVisitorBase(Heap* heap, LocalAllocator* local_allocator,
1196  RecordMigratedSlotVisitor* record_visitor)
1197  : heap_(heap),
1198  local_allocator_(local_allocator),
1199  record_visitor_(record_visitor) {
1200  migration_function_ = RawMigrateObject<MigrationMode::kFast>;
1201  }
1202 
1203  inline bool TryEvacuateObject(AllocationSpace target_space,
1204  HeapObject* object, int size,
1205  HeapObject** target_object) {
1206 #ifdef VERIFY_HEAP
1207  if (AbortCompactionForTesting(object)) return false;
1208 #endif // VERIFY_HEAP
1209  AllocationAlignment alignment =
1210  HeapObject::RequiredAlignment(object->map());
1211  AllocationResult allocation =
1212  local_allocator_->Allocate(target_space, size, alignment);
1213  if (allocation.To(target_object)) {
1214  MigrateObject(*target_object, object, size, target_space);
1215  return true;
1216  }
1217  return false;
1218  }
1219 
1220  inline void ExecuteMigrationObservers(AllocationSpace dest, HeapObject* src,
1221  HeapObject* dst, int size) {
1222  for (MigrationObserver* obs : observers_) {
1223  obs->Move(dest, src, dst, size);
1224  }
1225  }
1226 
1227  inline void MigrateObject(HeapObject* dst, HeapObject* src, int size,
1228  AllocationSpace dest) {
1229  migration_function_(this, dst, src, size, dest);
1230  }
1231 
1232 #ifdef VERIFY_HEAP
1233  bool AbortCompactionForTesting(HeapObject* object) {
1234  if (FLAG_stress_compaction) {
1235  const uintptr_t mask = static_cast<uintptr_t>(FLAG_random_seed) &
1236  kPageAlignmentMask & ~kObjectAlignmentMask;
1237  if ((object->address() & kPageAlignmentMask) == mask) {
1238  Page* page = Page::FromAddress(object->address());
1239  if (page->IsFlagSet(Page::COMPACTION_WAS_ABORTED_FOR_TESTING)) {
1240  page->ClearFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1241  } else {
1242  page->SetFlag(Page::COMPACTION_WAS_ABORTED_FOR_TESTING);
1243  return true;
1244  }
1245  }
1246  }
1247  return false;
1248  }
1249 #endif // VERIFY_HEAP
1250 
1251  Heap* heap_;
1252  LocalAllocator* local_allocator_;
1253  RecordMigratedSlotVisitor* record_visitor_;
1254  std::vector<MigrationObserver*> observers_;
1255  MigrateFunction migration_function_;
1256 };
1257 
1259  public:
1260  explicit EvacuateNewSpaceVisitor(
1261  Heap* heap, LocalAllocator* local_allocator,
1262  RecordMigratedSlotVisitor* record_visitor,
1263  Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1264  : EvacuateVisitorBase(heap, local_allocator, record_visitor),
1265  buffer_(LocalAllocationBuffer::InvalidBuffer()),
1266  promoted_size_(0),
1267  semispace_copied_size_(0),
1268  local_pretenuring_feedback_(local_pretenuring_feedback),
1269  is_incremental_marking_(heap->incremental_marking()->IsMarking()) {}
1270 
1271  inline bool Visit(HeapObject* object, int size) override {
1272  if (TryEvacuateWithoutCopy(object)) return true;
1273  HeapObject* target_object = nullptr;
1274  if (heap_->ShouldBePromoted(object->address()) &&
1275  TryEvacuateObject(OLD_SPACE, object, size, &target_object)) {
1276  promoted_size_ += size;
1277  return true;
1278  }
1279  heap_->UpdateAllocationSite(object->map(), object,
1280  local_pretenuring_feedback_);
1281  HeapObject* target = nullptr;
1282  AllocationSpace space = AllocateTargetObject(object, size, &target);
1283  MigrateObject(HeapObject::cast(target), object, size, space);
1284  semispace_copied_size_ += size;
1285  return true;
1286  }
1287 
1288  intptr_t promoted_size() { return promoted_size_; }
1289  intptr_t semispace_copied_size() { return semispace_copied_size_; }
1290 
1291  private:
1292  inline bool TryEvacuateWithoutCopy(HeapObject* object) {
1293  if (is_incremental_marking_) return false;
1294 
1295  Map map = object->map();
1296 
1297  // Some objects can be evacuated without creating a copy.
1298  if (map->visitor_id() == kVisitThinString) {
1299  HeapObject* actual = ThinString::cast(object)->unchecked_actual();
1300  if (MarkCompactCollector::IsOnEvacuationCandidate(actual)) return false;
1301  object->map_slot().Relaxed_Store(
1302  MapWord::FromForwardingAddress(actual).ToMap());
1303  return true;
1304  }
1305  // TODO(mlippautz): Handle ConsString.
1306 
1307  return false;
1308  }
1309 
1310  inline AllocationSpace AllocateTargetObject(HeapObject* old_object, int size,
1311  HeapObject** target_object) {
1312  AllocationAlignment alignment =
1313  HeapObject::RequiredAlignment(old_object->map());
1314  AllocationSpace space_allocated_in = NEW_SPACE;
1315  AllocationResult allocation =
1316  local_allocator_->Allocate(NEW_SPACE, size, alignment);
1317  if (allocation.IsRetry()) {
1318  allocation = AllocateInOldSpace(size, alignment);
1319  space_allocated_in = OLD_SPACE;
1320  }
1321  bool ok = allocation.To(target_object);
1322  DCHECK(ok);
1323  USE(ok);
1324  return space_allocated_in;
1325  }
1326 
1327  inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1328  AllocationAlignment alignment) {
1329  AllocationResult allocation =
1330  local_allocator_->Allocate(OLD_SPACE, size_in_bytes, alignment);
1331  if (allocation.IsRetry()) {
1332  heap_->FatalProcessOutOfMemory(
1333  "MarkCompactCollector: semi-space copy, fallback in old gen");
1334  }
1335  return allocation;
1336  }
1337 
1338  LocalAllocationBuffer buffer_;
1339  intptr_t promoted_size_;
1340  intptr_t semispace_copied_size_;
1341  Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1342  bool is_incremental_marking_;
1343 };
1344 
1345 template <PageEvacuationMode mode>
1347  public:
1348  explicit EvacuateNewSpacePageVisitor(
1349  Heap* heap, RecordMigratedSlotVisitor* record_visitor,
1350  Heap::PretenuringFeedbackMap* local_pretenuring_feedback)
1351  : heap_(heap),
1352  record_visitor_(record_visitor),
1353  moved_bytes_(0),
1354  local_pretenuring_feedback_(local_pretenuring_feedback) {}
1355 
1356  static void Move(Page* page) {
1357  switch (mode) {
1358  case NEW_TO_NEW:
1359  page->heap()->new_space()->MovePageFromSpaceToSpace(page);
1360  page->SetFlag(Page::PAGE_NEW_NEW_PROMOTION);
1361  break;
1362  case NEW_TO_OLD: {
1363  page->heap()->new_space()->from_space().RemovePage(page);
1364  Page* new_page = Page::ConvertNewToOld(page);
1365  DCHECK(!new_page->InNewSpace());
1366  new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
1367  break;
1368  }
1369  }
1370  }
1371 
1372  inline bool Visit(HeapObject* object, int size) override {
1373  if (mode == NEW_TO_NEW) {
1374  heap_->UpdateAllocationSite(object->map(), object,
1375  local_pretenuring_feedback_);
1376  } else if (mode == NEW_TO_OLD) {
1377  object->IterateBodyFast(record_visitor_);
1378  }
1379  return true;
1380  }
1381 
1382  intptr_t moved_bytes() { return moved_bytes_; }
1383  void account_moved_bytes(intptr_t bytes) { moved_bytes_ += bytes; }
1384 
1385  private:
1386  Heap* heap_;
1387  RecordMigratedSlotVisitor* record_visitor_;
1388  intptr_t moved_bytes_;
1389  Heap::PretenuringFeedbackMap* local_pretenuring_feedback_;
1390 };
1391 
1393  public:
1394  EvacuateOldSpaceVisitor(Heap* heap, LocalAllocator* local_allocator,
1395  RecordMigratedSlotVisitor* record_visitor)
1396  : EvacuateVisitorBase(heap, local_allocator, record_visitor) {}
1397 
1398  inline bool Visit(HeapObject* object, int size) override {
1399  HeapObject* target_object = nullptr;
1400  if (TryEvacuateObject(
1401  Page::FromAddress(object->address())->owner()->identity(), object,
1402  size, &target_object)) {
1403  DCHECK(object->map_word().IsForwardingAddress());
1404  return true;
1405  }
1406  return false;
1407  }
1408 };
1409 
1411  public:
1412  explicit EvacuateRecordOnlyVisitor(Heap* heap) : heap_(heap) {}
1413 
1414  inline bool Visit(HeapObject* object, int size) override {
1415  RecordMigratedSlotVisitor visitor(heap_->mark_compact_collector());
1416  object->IterateBodyFast(&visitor);
1417  return true;
1418  }
1419 
1420  private:
1421  Heap* heap_;
1422 };
1423 
1424 bool MarkCompactCollector::IsUnmarkedHeapObject(Heap* heap, ObjectSlot p) {
1425  Object* o = *p;
1426  if (!o->IsHeapObject()) return false;
1427  HeapObject* heap_object = HeapObject::cast(o);
1428  return heap->mark_compact_collector()->non_atomic_marking_state()->IsWhite(
1429  heap_object);
1430 }
1431 
1432 void MarkCompactCollector::MarkStringTable(
1433  ObjectVisitor* custom_root_body_visitor) {
1434  StringTable string_table = heap()->string_table();
1435  // Mark the string table itself.
1436  if (marking_state()->WhiteToBlack(string_table)) {
1437  // Explicitly mark the prefix.
1438  string_table->IteratePrefix(custom_root_body_visitor);
1439  }
1440 }
1441 
1442 void MarkCompactCollector::MarkRoots(RootVisitor* root_visitor,
1443  ObjectVisitor* custom_root_body_visitor) {
1444  // Mark the heap roots including global variables, stack variables,
1445  // etc., and all objects reachable from them.
1446  heap()->IterateStrongRoots(root_visitor, VISIT_ONLY_STRONG);
1447 
1448  // Custom marking for string table and top optimized frame.
1449  MarkStringTable(custom_root_body_visitor);
1450  ProcessTopOptimizedFrame(custom_root_body_visitor);
1451 }
1452 
1453 void MarkCompactCollector::ProcessEphemeronsUntilFixpoint() {
1454  bool work_to_do = true;
1455  int iterations = 0;
1456  int max_iterations = FLAG_ephemeron_fixpoint_iterations;
1457 
1458  while (work_to_do) {
1459  PerformWrapperTracing();
1460 
1461  if (iterations >= max_iterations) {
1462  // Give up fixpoint iteration and switch to linear algorithm.
1463  ProcessEphemeronsLinear();
1464  break;
1465  }
1466 
1467  // Move ephemerons from next_ephemerons into current_ephemerons to
1468  // drain them in this iteration.
1469  weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1470  heap()->concurrent_marking()->set_ephemeron_marked(false);
1471 
1472  {
1473  TRACE_GC(heap()->tracer(),
1474  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1475 
1476  if (FLAG_parallel_marking) {
1477  heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1478  }
1479 
1480  work_to_do = ProcessEphemerons();
1481  FinishConcurrentMarking(
1482  ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1483  }
1484 
1485  CHECK(weak_objects_.current_ephemerons.IsEmpty());
1486  CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1487 
1488  work_to_do = work_to_do || !marking_worklist()->IsEmpty() ||
1489  heap()->concurrent_marking()->ephemeron_marked() ||
1490  !marking_worklist()->IsEmbedderEmpty() ||
1491  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1492  ++iterations;
1493  }
1494 
1495  CHECK(marking_worklist()->IsEmpty());
1496  CHECK(weak_objects_.current_ephemerons.IsEmpty());
1497  CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1498 }
1499 
1500 bool MarkCompactCollector::ProcessEphemerons() {
1501  Ephemeron ephemeron;
1502  bool ephemeron_marked = false;
1503 
1504  // Drain current_ephemerons and push ephemerons where key and value are still
1505  // unreachable into next_ephemerons.
1506  while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1507  if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1508  ephemeron_marked = true;
1509  }
1510  }
1511 
1512  // Drain marking worklist and push discovered ephemerons into
1513  // discovered_ephemerons.
1514  ProcessMarkingWorklist();
1515 
1516  // Drain discovered_ephemerons (filled in the drain MarkingWorklist-phase
1517  // before) and push ephemerons where key and value are still unreachable into
1518  // next_ephemerons.
1519  while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1520  if (VisitEphemeron(ephemeron.key, ephemeron.value)) {
1521  ephemeron_marked = true;
1522  }
1523  }
1524 
1525  // Flush local ephemerons for main task to global pool.
1526  weak_objects_.ephemeron_hash_tables.FlushToGlobal(kMainThread);
1527  weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1528 
1529  return ephemeron_marked;
1530 }
1531 
1532 void MarkCompactCollector::ProcessEphemeronsLinear() {
1533  TRACE_GC(heap()->tracer(),
1534  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_LINEAR);
1535  CHECK(heap()->concurrent_marking()->IsStopped());
1536  std::unordered_multimap<HeapObject*, HeapObject*> key_to_values;
1537  Ephemeron ephemeron;
1538 
1539  DCHECK(weak_objects_.current_ephemerons.IsEmpty());
1540  weak_objects_.current_ephemerons.Swap(weak_objects_.next_ephemerons);
1541 
1542  while (weak_objects_.current_ephemerons.Pop(kMainThread, &ephemeron)) {
1543  VisitEphemeron(ephemeron.key, ephemeron.value);
1544 
1545  if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1546  key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1547  }
1548  }
1549 
1550  ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1551  bool work_to_do = true;
1552 
1553  while (work_to_do) {
1554  PerformWrapperTracing();
1555 
1556  ResetNewlyDiscovered();
1557  ephemeron_marking_.newly_discovered_limit = key_to_values.size();
1558 
1559  {
1560  TRACE_GC(heap()->tracer(),
1561  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON_MARKING);
1562  // Drain marking worklist and push all discovered objects into
1563  // newly_discovered.
1564  ProcessMarkingWorklistInternal<
1565  MarkCompactCollector::MarkingWorklistProcessingMode::
1566  kTrackNewlyDiscoveredObjects>();
1567  }
1568 
1569  while (weak_objects_.discovered_ephemerons.Pop(kMainThread, &ephemeron)) {
1570  VisitEphemeron(ephemeron.key, ephemeron.value);
1571 
1572  if (non_atomic_marking_state()->IsWhite(ephemeron.value)) {
1573  key_to_values.insert(std::make_pair(ephemeron.key, ephemeron.value));
1574  }
1575  }
1576 
1577  if (ephemeron_marking_.newly_discovered_overflowed) {
1578  // If newly_discovered was overflowed just visit all ephemerons in
1579  // next_ephemerons.
1580  weak_objects_.next_ephemerons.Iterate([&](Ephemeron ephemeron) {
1581  if (non_atomic_marking_state()->IsBlackOrGrey(ephemeron.key) &&
1582  non_atomic_marking_state()->WhiteToGrey(ephemeron.value)) {
1583  marking_worklist()->Push(ephemeron.value);
1584  }
1585  });
1586 
1587  } else {
1588  // This is the good case: newly_discovered stores all discovered
1589  // objects. Now use key_to_values to see if discovered objects keep more
1590  // objects alive due to ephemeron semantics.
1591  for (HeapObject* object : ephemeron_marking_.newly_discovered) {
1592  auto range = key_to_values.equal_range(object);
1593  for (auto it = range.first; it != range.second; ++it) {
1594  HeapObject* value = it->second;
1595  MarkObject(object, value);
1596  }
1597  }
1598  }
1599 
1600  // Do NOT drain marking worklist here, otherwise the current checks
1601  // for work_to_do are not sufficient for determining if another iteration
1602  // is necessary.
1603 
1604  work_to_do = !marking_worklist()->IsEmpty() ||
1605  !heap()->local_embedder_heap_tracer()->IsRemoteTracingDone();
1606  CHECK(weak_objects_.discovered_ephemerons.IsEmpty());
1607  }
1608 
1609  ResetNewlyDiscovered();
1610  ephemeron_marking_.newly_discovered.shrink_to_fit();
1611 
1612  CHECK(marking_worklist()->IsEmpty());
1613 }
1614 
1615 void MarkCompactCollector::PerformWrapperTracing() {
1616  if (heap_->local_embedder_heap_tracer()->InUse()) {
1617  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_EMBEDDER_TRACING);
1618  {
1619  LocalEmbedderHeapTracer::ProcessingScope scope(
1620  heap_->local_embedder_heap_tracer());
1621  HeapObject* object;
1622  while (marking_worklist()->embedder()->Pop(kMainThread, &object)) {
1623  scope.TracePossibleWrapper(JSObject::cast(object));
1624  }
1625  }
1626  heap_->local_embedder_heap_tracer()->Trace(
1627  std::numeric_limits<double>::infinity());
1628  }
1629 }
1630 
1631 void MarkCompactCollector::ProcessMarkingWorklist() {
1632  ProcessMarkingWorklistInternal<
1633  MarkCompactCollector::MarkingWorklistProcessingMode::kDefault>();
1634 }
1635 
1636 template <MarkCompactCollector::MarkingWorklistProcessingMode mode>
1637 void MarkCompactCollector::ProcessMarkingWorklistInternal() {
1638  HeapObject* object;
1639  MarkCompactMarkingVisitor visitor(this, marking_state());
1640  while ((object = marking_worklist()->Pop()) != nullptr) {
1641  DCHECK(!object->IsFiller());
1642  DCHECK(object->IsHeapObject());
1643  DCHECK(heap()->Contains(object));
1644  DCHECK(!(marking_state()->IsWhite(object)));
1645  marking_state()->GreyToBlack(object);
1646  if (mode == MarkCompactCollector::MarkingWorklistProcessingMode::
1647  kTrackNewlyDiscoveredObjects) {
1648  AddNewlyDiscovered(object);
1649  }
1650  Map map = object->map();
1651  MarkObject(object, map);
1652  visitor.Visit(map, object);
1653  }
1654  DCHECK(marking_worklist()->IsBailoutEmpty());
1655 }
1656 
1657 bool MarkCompactCollector::VisitEphemeron(HeapObject* key, HeapObject* value) {
1658  if (marking_state()->IsBlackOrGrey(key)) {
1659  if (marking_state()->WhiteToGrey(value)) {
1660  marking_worklist()->Push(value);
1661  return true;
1662  }
1663 
1664  } else if (marking_state()->IsWhite(value)) {
1665  weak_objects_.next_ephemerons.Push(kMainThread, Ephemeron{key, value});
1666  }
1667 
1668  return false;
1669 }
1670 
1671 void MarkCompactCollector::ProcessEphemeronMarking() {
1672  DCHECK(marking_worklist()->IsEmpty());
1673 
1674  // Incremental marking might leave ephemerons in main task's local
1675  // buffer, flush it into global pool.
1676  weak_objects_.next_ephemerons.FlushToGlobal(kMainThread);
1677 
1678  ProcessEphemeronsUntilFixpoint();
1679 
1680  CHECK(marking_worklist()->IsEmpty());
1681  CHECK(heap()->local_embedder_heap_tracer()->IsRemoteTracingDone());
1682 }
1683 
1684 void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
1685  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
1686  !it.done(); it.Advance()) {
1687  if (it.frame()->type() == StackFrame::INTERPRETED) {
1688  return;
1689  }
1690  if (it.frame()->type() == StackFrame::OPTIMIZED) {
1691  Code code = it.frame()->LookupCode();
1692  if (!code->CanDeoptAt(it.frame()->pc())) {
1693  Code::BodyDescriptor::IterateBody(code->map(), code, visitor);
1694  }
1695  return;
1696  }
1697  }
1698 }
1699 
1700 void MarkCompactCollector::RecordObjectStats() {
1701  if (V8_UNLIKELY(FLAG_gc_stats)) {
1702  heap()->CreateObjectStats();
1703  ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
1704  heap()->dead_object_stats_);
1705  collector.Collect();
1706  if (V8_UNLIKELY(FLAG_gc_stats &
1707  v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
1708  std::stringstream live, dead;
1709  heap()->live_object_stats_->Dump(live);
1710  heap()->dead_object_stats_->Dump(dead);
1711  TRACE_EVENT_INSTANT2(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats"),
1712  "V8.GC_Objects_Stats", TRACE_EVENT_SCOPE_THREAD,
1713  "live", TRACE_STR_COPY(live.str().c_str()), "dead",
1714  TRACE_STR_COPY(dead.str().c_str()));
1715  }
1716  if (FLAG_trace_gc_object_stats) {
1717  heap()->live_object_stats_->PrintJSON("live");
1718  heap()->dead_object_stats_->PrintJSON("dead");
1719  }
1720  heap()->live_object_stats_->CheckpointObjectStats();
1721  heap()->dead_object_stats_->ClearObjectStats();
1722  }
1723 }
1724 
1725 void MarkCompactCollector::MarkLiveObjects() {
1726  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK);
1727  // The recursive GC marker detects when it is nearing stack overflow,
1728  // and switches to a different marking system. JS interrupts interfere
1729  // with the C stack limit check.
1730  PostponeInterruptsScope postpone(isolate());
1731 
1732  {
1733  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_FINISH_INCREMENTAL);
1734  IncrementalMarking* incremental_marking = heap_->incremental_marking();
1735  if (was_marked_incrementally_) {
1736  incremental_marking->Finalize();
1737  } else {
1738  CHECK(incremental_marking->IsStopped());
1739  }
1740  }
1741 
1742 #ifdef DEBUG
1743  DCHECK(state_ == PREPARE_GC);
1744  state_ = MARK_LIVE_OBJECTS;
1745 #endif
1746 
1747  heap_->local_embedder_heap_tracer()->EnterFinalPause();
1748 
1749  RootMarkingVisitor root_visitor(this);
1750 
1751  {
1752  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_ROOTS);
1753  CustomRootBodyMarkingVisitor custom_root_body_visitor(this);
1754  MarkRoots(&root_visitor, &custom_root_body_visitor);
1755  }
1756 
1757  {
1758  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_MAIN);
1759  if (FLAG_parallel_marking) {
1760  heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1761  }
1762  ProcessMarkingWorklist();
1763 
1764  FinishConcurrentMarking(
1765  ConcurrentMarking::StopRequest::COMPLETE_ONGOING_TASKS);
1766  ProcessMarkingWorklist();
1767  }
1768 
1769  {
1770  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE);
1771 
1772  DCHECK(marking_worklist()->IsEmpty());
1773 
1774  // Mark objects reachable through the embedder heap. This phase is
1775  // opportunistic as it may not discover graphs that are only reachable
1776  // through ephemerons.
1777  {
1778  TRACE_GC(heap()->tracer(),
1779  GCTracer::Scope::MC_MARK_EMBEDDER_TRACING_CLOSURE);
1780  do {
1781  // PerformWrapperTracing() also empties the work items collected by
1782  // concurrent markers. As a result this call needs to happen at least
1783  // once.
1784  PerformWrapperTracing();
1785  ProcessMarkingWorklist();
1786  } while (!heap_->local_embedder_heap_tracer()->IsRemoteTracingDone() ||
1787  !marking_worklist()->IsEmbedderEmpty());
1788  DCHECK(marking_worklist()->IsEmbedderEmpty());
1789  DCHECK(marking_worklist()->IsEmpty());
1790  }
1791 
1792  // The objects reachable from the roots are marked, yet unreachable objects
1793  // are unmarked. Mark objects reachable due to embedder heap tracing or
1794  // harmony weak maps.
1795  {
1796  TRACE_GC(heap()->tracer(),
1797  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_EPHEMERON);
1798  ProcessEphemeronMarking();
1799  DCHECK(marking_worklist()->IsEmpty());
1800  }
1801 
1802  // The objects reachable from the roots, weak maps, and embedder heap
1803  // tracing are marked. Objects pointed to only by weak global handles cannot
1804  // be immediately reclaimed. Instead, we have to mark them as pending and
1805  // mark objects reachable from them.
1806  //
1807  // First we identify nonlive weak handles and mark them as pending
1808  // destruction.
1809  {
1810  TRACE_GC(heap()->tracer(),
1811  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_HANDLES);
1812  heap()->isolate()->global_handles()->IdentifyWeakHandles(
1813  &IsUnmarkedHeapObject);
1814  ProcessMarkingWorklist();
1815  }
1816 
1817  // Process finalizers, effectively keeping them alive until the next
1818  // garbage collection.
1819  {
1820  TRACE_GC(heap()->tracer(),
1821  GCTracer::Scope::MC_MARK_WEAK_CLOSURE_WEAK_ROOTS);
1822  heap()->isolate()->global_handles()->IterateWeakRootsForFinalizers(
1823  &root_visitor);
1824  ProcessMarkingWorklist();
1825  }
1826 
1827  // Repeat ephemeron processing from the newly marked objects.
1828  {
1829  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_MARK_WEAK_CLOSURE_HARMONY);
1830  ProcessEphemeronMarking();
1831  DCHECK(marking_worklist()->IsEmbedderEmpty());
1832  DCHECK(marking_worklist()->IsEmpty());
1833  }
1834 
1835  {
1836  heap()->isolate()->global_handles()->IterateWeakRootsForPhantomHandles(
1837  &IsUnmarkedHeapObject);
1838  }
1839  }
1840 
1841  if (was_marked_incrementally_) {
1842  heap()->incremental_marking()->Deactivate();
1843  }
1844 }
1845 
1846 void MarkCompactCollector::ClearNonLiveReferences() {
1847  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR);
1848 
1849  {
1850  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_STRING_TABLE);
1851 
1852  // Prune the string table removing all strings only pointed to by the
1853  // string table. Cannot use string_table() here because the string
1854  // table is marked.
1855  StringTable string_table = heap()->string_table();
1856  InternalizedStringTableCleaner internalized_visitor(heap(), string_table);
1857  string_table->IterateElements(&internalized_visitor);
1858  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
1859 
1860  ExternalStringTableCleaner external_visitor(heap());
1861  heap()->external_string_table_.IterateAll(&external_visitor);
1862  heap()->external_string_table_.CleanUpAll();
1863  }
1864 
1865  {
1866  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_LISTS);
1867  // Process the weak references.
1868  MarkCompactWeakObjectRetainer mark_compact_object_retainer(
1869  non_atomic_marking_state());
1870  heap()->ProcessAllWeakReferences(&mark_compact_object_retainer);
1871  }
1872 
1873  {
1874  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_MAPS);
1875  // ClearFullMapTransitions must be called before weak references are
1876  // cleared.
1877  ClearFullMapTransitions();
1878  }
1879  {
1880  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
1881  ClearWeakReferences();
1882  ClearWeakCollections();
1883  ClearJSWeakCells();
1884  }
1885 
1886  MarkDependentCodeForDeoptimization();
1887 
1888  DCHECK(weak_objects_.transition_arrays.IsEmpty());
1889  DCHECK(weak_objects_.weak_references.IsEmpty());
1890  DCHECK(weak_objects_.weak_objects_in_code.IsEmpty());
1891  DCHECK(weak_objects_.js_weak_cells.IsEmpty());
1892 }
1893 
1894 void MarkCompactCollector::MarkDependentCodeForDeoptimization() {
1895  std::pair<HeapObject*, Code> weak_object_in_code;
1896  while (weak_objects_.weak_objects_in_code.Pop(kMainThread,
1897  &weak_object_in_code)) {
1898  HeapObject* object = weak_object_in_code.first;
1899  Code code = weak_object_in_code.second;
1900  if (!non_atomic_marking_state()->IsBlackOrGrey(object) &&
1901  !code->embedded_objects_cleared()) {
1902  if (!code->marked_for_deoptimization()) {
1903  code->SetMarkedForDeoptimization("weak objects");
1904  have_code_to_deoptimize_ = true;
1905  }
1906  code->ClearEmbeddedObjects(heap_);
1907  DCHECK(code->embedded_objects_cleared());
1908  }
1909  }
1910 }
1911 
1912 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map dead_target) {
1913  DCHECK(non_atomic_marking_state()->IsWhite(dead_target));
1914  Object* potential_parent = dead_target->constructor_or_backpointer();
1915  if (potential_parent->IsMap()) {
1916  Map parent = Map::cast(potential_parent);
1917  DisallowHeapAllocation no_gc_obviously;
1918  if (non_atomic_marking_state()->IsBlackOrGrey(parent) &&
1919  TransitionsAccessor(isolate(), parent, &no_gc_obviously)
1920  .HasSimpleTransitionTo(dead_target)) {
1921  ClearPotentialSimpleMapTransition(parent, dead_target);
1922  }
1923  }
1924 }
1925 
1926 void MarkCompactCollector::ClearPotentialSimpleMapTransition(Map map,
1927  Map dead_target) {
1928  DCHECK(!map->is_prototype_map());
1929  DCHECK(!dead_target->is_prototype_map());
1930  DCHECK_EQ(map->raw_transitions(), HeapObjectReference::Weak(dead_target));
1931  // Take ownership of the descriptor array.
1932  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
1933  DescriptorArray* descriptors = map->instance_descriptors();
1934  if (descriptors == dead_target->instance_descriptors() &&
1935  number_of_own_descriptors > 0) {
1936  TrimDescriptorArray(map, descriptors);
1937  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
1938  }
1939 }
1940 
1941 void MarkCompactCollector::ClearFullMapTransitions() {
1942  TransitionArray* array;
1943  while (weak_objects_.transition_arrays.Pop(kMainThread, &array)) {
1944  int num_transitions = array->number_of_entries();
1945  if (num_transitions > 0) {
1946  Map map;
1947  // The array might contain "undefined" elements because it's not yet
1948  // filled. Allow it.
1949  if (array->GetTargetIfExists(0, isolate(), &map)) {
1950  DCHECK(!map.is_null()); // Weak pointers aren't cleared yet.
1951  Map parent = Map::cast(map->constructor_or_backpointer());
1952  bool parent_is_alive =
1953  non_atomic_marking_state()->IsBlackOrGrey(parent);
1954  DescriptorArray* descriptors =
1955  parent_is_alive ? parent->instance_descriptors() : nullptr;
1956  bool descriptors_owner_died =
1957  CompactTransitionArray(parent, array, descriptors);
1958  if (descriptors_owner_died) {
1959  TrimDescriptorArray(parent, descriptors);
1960  }
1961  }
1962  }
1963  }
1964 }
1965 
1966 bool MarkCompactCollector::CompactTransitionArray(
1967  Map map, TransitionArray* transitions, DescriptorArray* descriptors) {
1968  DCHECK(!map->is_prototype_map());
1969  int num_transitions = transitions->number_of_entries();
1970  bool descriptors_owner_died = false;
1971  int transition_index = 0;
1972  // Compact all live transitions to the left.
1973  for (int i = 0; i < num_transitions; ++i) {
1974  Map target = transitions->GetTarget(i);
1975  DCHECK_EQ(target->constructor_or_backpointer(), map);
1976  if (non_atomic_marking_state()->IsWhite(target)) {
1977  if (descriptors != nullptr &&
1978  target->instance_descriptors() == descriptors) {
1979  DCHECK(!target->is_prototype_map());
1980  descriptors_owner_died = true;
1981  }
1982  } else {
1983  if (i != transition_index) {
1984  Name key = transitions->GetKey(i);
1985  transitions->SetKey(transition_index, key);
1986  HeapObjectSlot key_slot = transitions->GetKeySlot(transition_index);
1987  RecordSlot(transitions, key_slot, key);
1988  MaybeObject raw_target = transitions->GetRawTarget(i);
1989  transitions->SetRawTarget(transition_index, raw_target);
1990  HeapObjectSlot target_slot =
1991  transitions->GetTargetSlot(transition_index);
1992  RecordSlot(transitions, target_slot, raw_target->GetHeapObject());
1993  }
1994  transition_index++;
1995  }
1996  }
1997  // If there are no transitions to be cleared, return.
1998  if (transition_index == num_transitions) {
1999  DCHECK(!descriptors_owner_died);
2000  return false;
2001  }
2002  // Note that we never eliminate a transition array, though we might right-trim
2003  // such that number_of_transitions() == 0. If this assumption changes,
2004  // TransitionArray::Insert() will need to deal with the case that a transition
2005  // array disappeared during GC.
2006  int trim = transitions->Capacity() - transition_index;
2007  if (trim > 0) {
2008  heap_->RightTrimWeakFixedArray(transitions,
2009  trim * TransitionArray::kEntrySize);
2010  transitions->SetNumberOfTransitions(transition_index);
2011  }
2012  return descriptors_owner_died;
2013 }
2014 
2015 void MarkCompactCollector::RightTrimDescriptorArray(DescriptorArray* array,
2016  int descriptors_to_trim) {
2017  int old_nof_all_descriptors = array->number_of_all_descriptors();
2018  int new_nof_all_descriptors = old_nof_all_descriptors - descriptors_to_trim;
2019  DCHECK_LT(0, descriptors_to_trim);
2020  DCHECK_LE(0, new_nof_all_descriptors);
2021  Address start = array->GetDescriptorSlot(new_nof_all_descriptors).address();
2022  Address end = array->GetDescriptorSlot(old_nof_all_descriptors).address();
2023  RememberedSet<OLD_TO_NEW>::RemoveRange(MemoryChunk::FromHeapObject(array),
2024  start, end,
2025  SlotSet::PREFREE_EMPTY_BUCKETS);
2026  RememberedSet<OLD_TO_OLD>::RemoveRange(MemoryChunk::FromHeapObject(array),
2027  start, end,
2028  SlotSet::PREFREE_EMPTY_BUCKETS);
2029  heap()->CreateFillerObjectAt(start, static_cast<int>(end - start),
2030  ClearRecordedSlots::kNo);
2031  array->set_number_of_all_descriptors(new_nof_all_descriptors);
2032 }
2033 
2034 void MarkCompactCollector::TrimDescriptorArray(Map map,
2035  DescriptorArray* descriptors) {
2036  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
2037  if (number_of_own_descriptors == 0) {
2038  DCHECK(descriptors == ReadOnlyRoots(heap_).empty_descriptor_array());
2039  return;
2040  }
2041  // TODO(ulan): Trim only if slack is greater than some percentage threshold.
2042  int to_trim =
2043  descriptors->number_of_all_descriptors() - number_of_own_descriptors;
2044  if (to_trim > 0) {
2045  descriptors->set_number_of_descriptors(number_of_own_descriptors);
2046  RightTrimDescriptorArray(descriptors, to_trim);
2047 
2048  TrimEnumCache(map, descriptors);
2049  descriptors->Sort();
2050 
2051  if (FLAG_unbox_double_fields) {
2052  LayoutDescriptor layout_descriptor = map->layout_descriptor();
2053  layout_descriptor = layout_descriptor->Trim(heap_, map, descriptors,
2054  number_of_own_descriptors);
2055  SLOW_DCHECK(layout_descriptor->IsConsistentWithMap(map, true));
2056  }
2057  }
2058  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
2059  map->set_owns_descriptors(true);
2060 }
2061 
2062 void MarkCompactCollector::TrimEnumCache(Map map,
2063  DescriptorArray* descriptors) {
2064  int live_enum = map->EnumLength();
2065  if (live_enum == kInvalidEnumCacheSentinel) {
2066  live_enum = map->NumberOfEnumerableProperties();
2067  }
2068  if (live_enum == 0) return descriptors->ClearEnumCache();
2069  EnumCache* enum_cache = descriptors->enum_cache();
2070 
2071  FixedArray keys = enum_cache->keys();
2072  int to_trim = keys->length() - live_enum;
2073  if (to_trim <= 0) return;
2074  heap_->RightTrimFixedArray(keys, to_trim);
2075 
2076  FixedArray indices = enum_cache->indices();
2077  to_trim = indices->length() - live_enum;
2078  if (to_trim <= 0) return;
2079  heap_->RightTrimFixedArray(indices, to_trim);
2080 }
2081 
2082 void MarkCompactCollector::ClearWeakCollections() {
2083  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_COLLECTIONS);
2084  EphemeronHashTable table;
2085 
2086  while (weak_objects_.ephemeron_hash_tables.Pop(kMainThread, &table)) {
2087  for (int i = 0; i < table->Capacity(); i++) {
2088  HeapObject* key = HeapObject::cast(table->KeyAt(i));
2089 #ifdef VERIFY_HEAP
2090  Object* value = table->ValueAt(i);
2091 
2092  if (value->IsHeapObject()) {
2093  CHECK_IMPLIES(
2094  non_atomic_marking_state()->IsBlackOrGrey(key),
2095  non_atomic_marking_state()->IsBlackOrGrey(HeapObject::cast(value)));
2096  }
2097 #endif
2098  if (!non_atomic_marking_state()->IsBlackOrGrey(key)) {
2099  table->RemoveEntry(i);
2100  }
2101  }
2102  }
2103 }
2104 
2105 void MarkCompactCollector::ClearWeakReferences() {
2106  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_CLEAR_WEAK_REFERENCES);
2107  std::pair<HeapObject*, HeapObjectSlot> slot;
2108  HeapObjectReference cleared_weak_ref =
2109  HeapObjectReference::ClearedValue(isolate());
2110  while (weak_objects_.weak_references.Pop(kMainThread, &slot)) {
2111  HeapObject* value;
2112  HeapObjectSlot location = slot.second;
2113  if ((*location)->GetHeapObjectIfWeak(&value)) {
2114  DCHECK(!value->IsCell());
2115  if (non_atomic_marking_state()->IsBlackOrGrey(value)) {
2116  // The value of the weak reference is alive.
2117  RecordSlot(slot.first, location, value);
2118  } else {
2119  if (value->IsMap()) {
2120  // The map is non-live.
2121  ClearPotentialSimpleMapTransition(Map::cast(value));
2122  }
2123  location.store(cleared_weak_ref);
2124  }
2125  }
2126  }
2127 }
2128 
2129 void MarkCompactCollector::ClearJSWeakCells() {
2130  if (!FLAG_harmony_weak_refs) {
2131  return;
2132  }
2133  JSWeakCell* weak_cell;
2134  while (weak_objects_.js_weak_cells.Pop(kMainThread, &weak_cell)) {
2135  // We do not insert cleared weak cells into the list, so the value
2136  // cannot be a Smi here.
2137  HeapObject* target = HeapObject::cast(weak_cell->target());
2138  if (!non_atomic_marking_state()->IsBlackOrGrey(target)) {
2139  // The value of the JSWeakCell is dead.
2140  JSWeakFactory* weak_factory = JSWeakFactory::cast(weak_cell->factory());
2141  if (!weak_factory->scheduled_for_cleanup()) {
2142  heap()->AddDirtyJSWeakFactory(
2143  weak_factory,
2144  [](HeapObject* object, ObjectSlot slot, Object* target) {
2145  if (target->IsHeapObject()) {
2146  RecordSlot(object, slot, HeapObject::cast(target));
2147  }
2148  });
2149  }
2150  // We're modifying the pointers in JSWeakCell and JSWeakFactory during GC;
2151  // thus we need to record the slots it writes. The normal write barrier is
2152  // not enough, since it's disabled before GC.
2153  weak_cell->Nullify(
2154  isolate(), [](HeapObject* object, ObjectSlot slot, Object* target) {
2155  if (target->IsHeapObject()) {
2156  RecordSlot(object, slot, HeapObject::cast(target));
2157  }
2158  });
2159  DCHECK(weak_factory->NeedsCleanup());
2160  DCHECK(weak_factory->scheduled_for_cleanup());
2161  } else {
2162  // The value of the JSWeakCell is alive.
2163  ObjectSlot slot =
2164  HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
2165  RecordSlot(weak_cell, slot, HeapObject::cast(*slot));
2166  }
2167  }
2168 }
2169 
2170 void MarkCompactCollector::AbortWeakObjects() {
2171  weak_objects_.transition_arrays.Clear();
2172  weak_objects_.ephemeron_hash_tables.Clear();
2173  weak_objects_.current_ephemerons.Clear();
2174  weak_objects_.next_ephemerons.Clear();
2175  weak_objects_.discovered_ephemerons.Clear();
2176  weak_objects_.weak_references.Clear();
2177  weak_objects_.weak_objects_in_code.Clear();
2178  weak_objects_.js_weak_cells.Clear();
2179 }
2180 
2181 bool MarkCompactCollector::IsOnEvacuationCandidate(MaybeObject obj) {
2182  return Page::FromAddress(obj.ptr())->IsEvacuationCandidate();
2183 }
2184 
2185 void MarkCompactCollector::RecordRelocSlot(Code host, RelocInfo* rinfo,
2186  Object* target) {
2187  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
2188  Page* source_page = Page::FromAddress(host.ptr());
2189  if (target_page->IsEvacuationCandidate() &&
2190  (rinfo->host().is_null() ||
2191  !source_page->ShouldSkipEvacuationSlotRecording())) {
2192  RelocInfo::Mode rmode = rinfo->rmode();
2193  Address addr = rinfo->pc();
2194  SlotType slot_type = SlotTypeForRelocInfoMode(rmode);
2195  if (rinfo->IsInConstantPool()) {
2196  addr = rinfo->constant_pool_entry_address();
2197  if (RelocInfo::IsCodeTargetMode(rmode)) {
2198  slot_type = CODE_ENTRY_SLOT;
2199  } else {
2200  DCHECK(RelocInfo::IsEmbeddedObject(rmode));
2201  slot_type = OBJECT_SLOT;
2202  }
2203  }
2204  RememberedSet<OLD_TO_OLD>::InsertTyped(source_page, host.ptr(), slot_type,
2205  addr);
2206  }
2207 }
2208 
2209 template <AccessMode access_mode>
2210 static inline SlotCallbackResult UpdateSlot(
2211  MaybeObjectSlot slot, MaybeObject old, HeapObject* heap_obj,
2212  HeapObjectReferenceType reference_type) {
2213  MapWord map_word = heap_obj->map_word();
2214  if (map_word.IsForwardingAddress()) {
2215  DCHECK(Heap::InFromSpace(heap_obj) ||
2216  MarkCompactCollector::IsOnEvacuationCandidate(heap_obj) ||
2217  Page::FromAddress(heap_obj->address())
2218  ->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2219  MaybeObject target =
2220  reference_type == HeapObjectReferenceType::WEAK
2221  ? HeapObjectReference::Weak(map_word.ToForwardingAddress())
2222  : HeapObjectReference::Strong(map_word.ToForwardingAddress());
2223  if (access_mode == AccessMode::NON_ATOMIC) {
2224  slot.store(target);
2225  } else {
2226  slot.Release_CompareAndSwap(old, target);
2227  }
2228  DCHECK(!Heap::InFromSpace(target));
2229  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(target));
2230  } else {
2231  DCHECK(heap_obj->map()->IsMap());
2232  }
2233  // OLD_TO_OLD slots are always removed after updating.
2234  return REMOVE_SLOT;
2235 }
2236 
2237 template <AccessMode access_mode>
2238 static inline SlotCallbackResult UpdateSlot(MaybeObjectSlot slot) {
2239  MaybeObject obj = slot.Relaxed_Load();
2240  HeapObject* heap_obj;
2241  if (obj->GetHeapObjectIfWeak(&heap_obj)) {
2242  UpdateSlot<access_mode>(slot, obj, heap_obj, HeapObjectReferenceType::WEAK);
2243  } else if (obj->GetHeapObjectIfStrong(&heap_obj)) {
2244  return UpdateSlot<access_mode>(slot, obj, heap_obj,
2245  HeapObjectReferenceType::STRONG);
2246  }
2247  return REMOVE_SLOT;
2248 }
2249 
2250 template <AccessMode access_mode>
2251 static inline SlotCallbackResult UpdateStrongSlot(MaybeObjectSlot maybe_slot) {
2252  DCHECK((*maybe_slot)->IsSmi() || (*maybe_slot)->IsStrong());
2253  ObjectSlot slot(maybe_slot);
2254  Object* obj = slot.Relaxed_Load();
2255  if (obj->IsHeapObject()) {
2256  HeapObject* heap_obj = HeapObject::cast(obj);
2257  return UpdateSlot<access_mode>(maybe_slot, MaybeObject::FromObject(obj),
2258  heap_obj, HeapObjectReferenceType::STRONG);
2259  }
2260  return REMOVE_SLOT;
2261 }
2262 
2263 // Visitor for updating root pointers and to-space pointers.
2264 // It does not expect to encounter pointers to dead objects.
2265 // TODO(ulan): Remove code object specific functions. This visitor
2266 // nevers visits code objects.
2268  public:
2269  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
2270 
2271  void VisitPointer(HeapObject* host, ObjectSlot p) override {
2272  UpdateStrongSlotInternal(p);
2273  }
2274 
2275  void VisitPointer(HeapObject* host, MaybeObjectSlot p) override {
2276  UpdateSlotInternal(p);
2277  }
2278 
2279  void VisitPointers(HeapObject* host, ObjectSlot start,
2280  ObjectSlot end) override {
2281  for (ObjectSlot p = start; p < end; ++p) {
2282  UpdateStrongSlotInternal(p);
2283  }
2284  }
2285 
2286  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
2287  MaybeObjectSlot end) final {
2288  for (MaybeObjectSlot p = start; p < end; ++p) {
2289  UpdateSlotInternal(p);
2290  }
2291  }
2292 
2293  void VisitRootPointer(Root root, const char* description,
2294  ObjectSlot p) override {
2295  UpdateStrongSlotInternal(p);
2296  }
2297 
2298  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
2299  ObjectSlot end) override {
2300  for (ObjectSlot p = start; p < end; ++p) UpdateStrongSlotInternal(p);
2301  }
2302 
2303  void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
2304  UpdateTypedSlotHelper::UpdateEmbeddedPointer(
2305  heap_, rinfo, UpdateStrongMaybeObjectSlotInternal);
2306  }
2307 
2308  void VisitCodeTarget(Code host, RelocInfo* rinfo) override {
2309  UpdateTypedSlotHelper::UpdateCodeTarget(
2310  rinfo, UpdateStrongMaybeObjectSlotInternal);
2311  }
2312 
2313  private:
2314  static inline SlotCallbackResult UpdateStrongMaybeObjectSlotInternal(
2315  MaybeObjectSlot slot) {
2316  DCHECK(!(*slot)->IsWeakOrCleared());
2317  return UpdateStrongSlot<AccessMode::NON_ATOMIC>(slot);
2318  }
2319 
2320  static inline SlotCallbackResult UpdateStrongSlotInternal(ObjectSlot slot) {
2321  DCHECK(!HasWeakHeapObjectTag(*slot));
2322  return UpdateStrongSlot<AccessMode::NON_ATOMIC>(MaybeObjectSlot(slot));
2323  }
2324 
2325  static inline SlotCallbackResult UpdateSlotInternal(MaybeObjectSlot slot) {
2326  return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
2327  }
2328 
2329  Heap* heap_;
2330 };
2331 
2332 static String UpdateReferenceInExternalStringTableEntry(Heap* heap,
2333  ObjectSlot p) {
2334  MapWord map_word = HeapObject::cast(*p)->map_word();
2335 
2336  if (map_word.IsForwardingAddress()) {
2337  String new_string = String::cast(map_word.ToForwardingAddress());
2338 
2339  if (new_string->IsExternalString()) {
2340  MemoryChunk::MoveExternalBackingStoreBytes(
2341  ExternalBackingStoreType::kExternalString,
2342  Page::FromAddress(reinterpret_cast<Address>(*p)),
2343  Page::FromHeapObject(new_string),
2344  ExternalString::cast(new_string)->ExternalPayloadSize());
2345  }
2346  return new_string;
2347  }
2348 
2349  return String::cast(*p);
2350 }
2351 
2352 void MarkCompactCollector::EvacuatePrologue() {
2353  // New space.
2354  NewSpace* new_space = heap()->new_space();
2355  // Append the list of new space pages to be processed.
2356  for (Page* p :
2357  PageRange(new_space->first_allocatable_address(), new_space->top())) {
2358  new_space_evacuation_pages_.push_back(p);
2359  }
2360  new_space->Flip();
2361  new_space->ResetLinearAllocationArea();
2362 
2363  heap()->new_lo_space()->Flip();
2364 
2365  // Old space.
2366  DCHECK(old_space_evacuation_pages_.empty());
2367  old_space_evacuation_pages_ = std::move(evacuation_candidates_);
2368  evacuation_candidates_.clear();
2369  DCHECK(evacuation_candidates_.empty());
2370 }
2371 
2372 void MarkCompactCollector::EvacuateEpilogue() {
2373  aborted_evacuation_candidates_.clear();
2374  // New space.
2375  heap()->new_space()->set_age_mark(heap()->new_space()->top());
2376  // Deallocate unmarked large objects.
2377  heap()->lo_space()->FreeUnmarkedObjects();
2378  heap()->code_lo_space()->FreeUnmarkedObjects();
2379  heap()->new_lo_space()->FreeUnmarkedObjects();
2380  // Old space. Deallocate evacuated candidate pages.
2381  ReleaseEvacuationCandidates();
2382  // Give pages that are queued to be freed back to the OS.
2383  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2384 #ifdef DEBUG
2385  // Old-to-old slot sets must be empty after evacuation.
2386  for (Page* p : *heap()->old_space()) {
2387  DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2388  DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
2389  DCHECK_NULL(p->invalidated_slots());
2390  }
2391 #endif
2392 }
2393 
2394 class Evacuator : public Malloced {
2395  public:
2396  enum EvacuationMode {
2397  kObjectsNewToOld,
2398  kPageNewToOld,
2399  kObjectsOldToOld,
2400  kPageNewToNew,
2401  };
2402 
2403  static inline EvacuationMode ComputeEvacuationMode(MemoryChunk* chunk) {
2404  // Note: The order of checks is important in this function.
2405  if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_OLD_PROMOTION))
2406  return kPageNewToOld;
2407  if (chunk->IsFlagSet(MemoryChunk::PAGE_NEW_NEW_PROMOTION))
2408  return kPageNewToNew;
2409  if (chunk->InNewSpace()) return kObjectsNewToOld;
2410  return kObjectsOldToOld;
2411  }
2412 
2413  // NewSpacePages with more live bytes than this threshold qualify for fast
2414  // evacuation.
2415  static intptr_t NewSpacePageEvacuationThreshold() {
2416  if (FLAG_page_promotion)
2417  return FLAG_page_promotion_threshold *
2418  MemoryChunkLayout::AllocatableMemoryInDataPage() / 100;
2419  return MemoryChunkLayout::AllocatableMemoryInDataPage() + kPointerSize;
2420  }
2421 
2422  Evacuator(Heap* heap, RecordMigratedSlotVisitor* record_visitor)
2423  : heap_(heap),
2424  local_allocator_(heap_),
2425  local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
2426  new_space_visitor_(heap_, &local_allocator_, record_visitor,
2427  &local_pretenuring_feedback_),
2428  new_to_new_page_visitor_(heap_, record_visitor,
2429  &local_pretenuring_feedback_),
2430  new_to_old_page_visitor_(heap_, record_visitor,
2431  &local_pretenuring_feedback_),
2432 
2433  old_space_visitor_(heap_, &local_allocator_, record_visitor),
2434  duration_(0.0),
2435  bytes_compacted_(0) {}
2436 
2437  virtual ~Evacuator() = default;
2438 
2439  void EvacuatePage(MemoryChunk* chunk);
2440 
2441  void AddObserver(MigrationObserver* observer) {
2442  new_space_visitor_.AddObserver(observer);
2443  old_space_visitor_.AddObserver(observer);
2444  }
2445 
2446  // Merge back locally cached info sequentially. Note that this method needs
2447  // to be called from the main thread.
2448  inline void Finalize();
2449 
2450  virtual GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() = 0;
2451 
2452  protected:
2453  static const int kInitialLocalPretenuringFeedbackCapacity = 256;
2454 
2455  // |saved_live_bytes| returns the live bytes of the page that was processed.
2456  virtual void RawEvacuatePage(MemoryChunk* chunk,
2457  intptr_t* saved_live_bytes) = 0;
2458 
2459  inline Heap* heap() { return heap_; }
2460 
2461  void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
2462  duration_ += duration;
2463  bytes_compacted_ += bytes_compacted;
2464  }
2465 
2466  Heap* heap_;
2467 
2468  // Locally cached collector data.
2469  LocalAllocator local_allocator_;
2470  Heap::PretenuringFeedbackMap local_pretenuring_feedback_;
2471 
2472  // Visitors for the corresponding spaces.
2473  EvacuateNewSpaceVisitor new_space_visitor_;
2475  new_to_new_page_visitor_;
2477  new_to_old_page_visitor_;
2478  EvacuateOldSpaceVisitor old_space_visitor_;
2479 
2480  // Book keeping info.
2481  double duration_;
2482  intptr_t bytes_compacted_;
2483 };
2484 
2485 void Evacuator::EvacuatePage(MemoryChunk* chunk) {
2486  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"), "Evacuator::EvacuatePage");
2487  DCHECK(chunk->SweepingDone());
2488  intptr_t saved_live_bytes = 0;
2489  double evacuation_time = 0.0;
2490  {
2491  AlwaysAllocateScope always_allocate(heap()->isolate());
2492  TimedScope timed_scope(&evacuation_time);
2493  RawEvacuatePage(chunk, &saved_live_bytes);
2494  }
2495  ReportCompactionProgress(evacuation_time, saved_live_bytes);
2496  if (FLAG_trace_evacuation) {
2497  PrintIsolate(heap()->isolate(),
2498  "evacuation[%p]: page=%p new_space=%d "
2499  "page_evacuation=%d executable=%d contains_age_mark=%d "
2500  "live_bytes=%" V8PRIdPTR " time=%f success=%d\n",
2501  static_cast<void*>(this), static_cast<void*>(chunk),
2502  chunk->InNewSpace(),
2503  chunk->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION) ||
2504  chunk->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION),
2505  chunk->IsFlagSet(MemoryChunk::IS_EXECUTABLE),
2506  chunk->Contains(heap()->new_space()->age_mark()),
2507  saved_live_bytes, evacuation_time,
2508  chunk->IsFlagSet(Page::COMPACTION_WAS_ABORTED));
2509  }
2510 }
2511 
2512 void Evacuator::Finalize() {
2513  local_allocator_.Finalize();
2514  heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
2515  heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size() +
2516  new_to_old_page_visitor_.moved_bytes());
2517  heap()->IncrementSemiSpaceCopiedObjectSize(
2518  new_space_visitor_.semispace_copied_size() +
2519  new_to_new_page_visitor_.moved_bytes());
2520  heap()->IncrementYoungSurvivorsCounter(
2521  new_space_visitor_.promoted_size() +
2522  new_space_visitor_.semispace_copied_size() +
2523  new_to_old_page_visitor_.moved_bytes() +
2524  new_to_new_page_visitor_.moved_bytes());
2525  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
2526 }
2527 
2528 class FullEvacuator : public Evacuator {
2529  public:
2531  RecordMigratedSlotVisitor* record_visitor)
2532  : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
2533 
2534  GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
2535  return GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_COPY;
2536  }
2537 
2538  protected:
2539  void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
2540 
2541  MarkCompactCollector* collector_;
2542 };
2543 
2544 void FullEvacuator::RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) {
2545  const EvacuationMode evacuation_mode = ComputeEvacuationMode(chunk);
2546  TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2547  "FullEvacuator::RawEvacuatePage", "evacuation_mode",
2548  evacuation_mode);
2550  collector_->non_atomic_marking_state();
2551  *live_bytes = marking_state->live_bytes(chunk);
2552  HeapObject* failed_object = nullptr;
2553  switch (evacuation_mode) {
2554  case kObjectsNewToOld:
2555  LiveObjectVisitor::VisitBlackObjectsNoFail(
2556  chunk, marking_state, &new_space_visitor_,
2557  LiveObjectVisitor::kClearMarkbits);
2558  // ArrayBufferTracker will be updated during pointers updating.
2559  break;
2560  case kPageNewToOld:
2561  LiveObjectVisitor::VisitBlackObjectsNoFail(
2562  chunk, marking_state, &new_to_old_page_visitor_,
2563  LiveObjectVisitor::kKeepMarking);
2564  new_to_old_page_visitor_.account_moved_bytes(
2565  marking_state->live_bytes(chunk));
2566  // ArrayBufferTracker will be updated during sweeping.
2567  break;
2568  case kPageNewToNew:
2569  LiveObjectVisitor::VisitBlackObjectsNoFail(
2570  chunk, marking_state, &new_to_new_page_visitor_,
2571  LiveObjectVisitor::kKeepMarking);
2572  new_to_new_page_visitor_.account_moved_bytes(
2573  marking_state->live_bytes(chunk));
2574  // ArrayBufferTracker will be updated during sweeping.
2575  break;
2576  case kObjectsOldToOld: {
2577  const bool success = LiveObjectVisitor::VisitBlackObjects(
2578  chunk, marking_state, &old_space_visitor_,
2579  LiveObjectVisitor::kClearMarkbits, &failed_object);
2580  if (!success) {
2581  // Aborted compaction page. Actual processing happens on the main
2582  // thread for simplicity reasons.
2583  collector_->ReportAbortedEvacuationCandidate(failed_object, chunk);
2584  } else {
2585  // ArrayBufferTracker will be updated during pointers updating.
2586  }
2587  break;
2588  }
2589  }
2590 }
2591 
2593  public:
2594  explicit EvacuationItem(MemoryChunk* chunk) : chunk_(chunk) {}
2595  ~EvacuationItem() override = default;
2596  MemoryChunk* chunk() const { return chunk_; }
2597 
2598  private:
2599  MemoryChunk* chunk_;
2600 };
2601 
2603  public:
2604  PageEvacuationTask(Isolate* isolate, Evacuator* evacuator)
2605  : ItemParallelJob::Task(isolate),
2606  evacuator_(evacuator),
2607  tracer_(isolate->heap()->tracer()) {}
2608 
2609  void RunInParallel() override {
2610  TRACE_BACKGROUND_GC(tracer_, evacuator_->GetBackgroundTracingScope());
2611  EvacuationItem* item = nullptr;
2612  while ((item = GetItem<EvacuationItem>()) != nullptr) {
2613  evacuator_->EvacuatePage(item->chunk());
2614  item->MarkFinished();
2615  }
2616  };
2617 
2618  private:
2619  Evacuator* evacuator_;
2620  GCTracer* tracer_;
2621 };
2622 
2623 template <class Evacuator, class Collector>
2624 void MarkCompactCollectorBase::CreateAndExecuteEvacuationTasks(
2625  Collector* collector, ItemParallelJob* job,
2626  RecordMigratedSlotVisitor* record_visitor,
2627  MigrationObserver* migration_observer, const intptr_t live_bytes) {
2628  // Used for trace summary.
2629  double compaction_speed = 0;
2630  if (FLAG_trace_evacuation) {
2631  compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
2632  }
2633 
2634  const bool profiling = isolate()->LogObjectRelocation();
2635  ProfilingMigrationObserver profiling_observer(heap());
2636 
2637  const int wanted_num_tasks =
2638  NumberOfParallelCompactionTasks(job->NumberOfItems());
2639  Evacuator** evacuators = new Evacuator*[wanted_num_tasks];
2640  for (int i = 0; i < wanted_num_tasks; i++) {
2641  evacuators[i] = new Evacuator(collector, record_visitor);
2642  if (profiling) evacuators[i]->AddObserver(&profiling_observer);
2643  if (migration_observer != nullptr)
2644  evacuators[i]->AddObserver(migration_observer);
2645  job->AddTask(new PageEvacuationTask(heap()->isolate(), evacuators[i]));
2646  }
2647  job->Run(isolate()->async_counters());
2648  for (int i = 0; i < wanted_num_tasks; i++) {
2649  evacuators[i]->Finalize();
2650  delete evacuators[i];
2651  }
2652  delete[] evacuators;
2653 
2654  if (FLAG_trace_evacuation) {
2655  PrintIsolate(isolate(),
2656  "%8.0f ms: evacuation-summary: parallel=%s pages=%d "
2657  "wanted_tasks=%d tasks=%d cores=%d live_bytes=%" V8PRIdPTR
2658  " compaction_speed=%.f\n",
2659  isolate()->time_millis_since_init(),
2660  FLAG_parallel_compaction ? "yes" : "no", job->NumberOfItems(),
2661  wanted_num_tasks, job->NumberOfTasks(),
2662  V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1,
2663  live_bytes, compaction_speed);
2664  }
2665 }
2666 
2667 bool MarkCompactCollectorBase::ShouldMovePage(Page* p, intptr_t live_bytes) {
2668  const bool reduce_memory = heap()->ShouldReduceMemory();
2669  const Address age_mark = heap()->new_space()->age_mark();
2670  return !reduce_memory && !p->NeverEvacuate() &&
2671  (live_bytes > Evacuator::NewSpacePageEvacuationThreshold()) &&
2672  !p->Contains(age_mark) && heap()->CanExpandOldGeneration(live_bytes);
2673 }
2674 
2675 void MarkCompactCollector::EvacuatePagesInParallel() {
2676  ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
2677  &page_parallel_job_semaphore_);
2678  intptr_t live_bytes = 0;
2679 
2680  for (Page* page : old_space_evacuation_pages_) {
2681  live_bytes += non_atomic_marking_state()->live_bytes(page);
2682  evacuation_job.AddItem(new EvacuationItem(page));
2683  }
2684 
2685  for (Page* page : new_space_evacuation_pages_) {
2686  intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
2687  if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
2688  live_bytes += live_bytes_on_page;
2689  if (ShouldMovePage(page, live_bytes_on_page)) {
2690  if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
2691  EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
2692  DCHECK_EQ(heap()->old_space(), page->owner());
2693  // The move added page->allocated_bytes to the old space, but we are
2694  // going to sweep the page and add page->live_byte_count.
2695  heap()->old_space()->DecreaseAllocatedBytes(page->allocated_bytes(),
2696  page);
2697  } else {
2698  EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
2699  }
2700  }
2701  evacuation_job.AddItem(new EvacuationItem(page));
2702  }
2703 
2704  // Promote young generation large objects.
2705  LargePage* current = heap()->new_lo_space()->first_page();
2706  IncrementalMarking::NonAtomicMarkingState* marking_state =
2707  heap()->incremental_marking()->non_atomic_marking_state();
2708  while (current) {
2709  LargePage* next_current = current->next_page();
2710  HeapObject* object = current->GetObject();
2711  DCHECK(!marking_state->IsGrey(object));
2712  if (marking_state->IsBlack(object)) {
2713  heap_->lo_space()->PromoteNewLargeObject(current);
2714  current->SetFlag(Page::PAGE_NEW_OLD_PROMOTION);
2715  evacuation_job.AddItem(new EvacuationItem(current));
2716  }
2717  current = next_current;
2718  }
2719 
2720  if (evacuation_job.NumberOfItems() == 0) return;
2721 
2722  RecordMigratedSlotVisitor record_visitor(this);
2723  CreateAndExecuteEvacuationTasks<FullEvacuator>(
2724  this, &evacuation_job, &record_visitor, nullptr, live_bytes);
2725  PostProcessEvacuationCandidates();
2726 }
2727 
2729  public:
2730  Object* RetainAs(Object* object) override {
2731  if (object->IsHeapObject()) {
2732  HeapObject* heap_object = HeapObject::cast(object);
2733  MapWord map_word = heap_object->map_word();
2734  if (map_word.IsForwardingAddress()) {
2735  return map_word.ToForwardingAddress();
2736  }
2737  }
2738  return object;
2739  }
2740 };
2741 
2742 void MarkCompactCollector::RecordLiveSlotsOnPage(Page* page) {
2743  EvacuateRecordOnlyVisitor visitor(heap());
2744  LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
2745  &visitor,
2746  LiveObjectVisitor::kKeepMarking);
2747 }
2748 
2749 template <class Visitor, typename MarkingState>
2750 bool LiveObjectVisitor::VisitBlackObjects(MemoryChunk* chunk,
2751  MarkingState* marking_state,
2752  Visitor* visitor,
2753  IterationMode iteration_mode,
2754  HeapObject** failed_object) {
2755  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2756  "LiveObjectVisitor::VisitBlackObjects");
2757  for (auto object_and_size :
2758  LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2759  HeapObject* const object = object_and_size.first;
2760  if (!visitor->Visit(object, object_and_size.second)) {
2761  if (iteration_mode == kClearMarkbits) {
2762  marking_state->bitmap(chunk)->ClearRange(
2763  chunk->AddressToMarkbitIndex(chunk->area_start()),
2764  chunk->AddressToMarkbitIndex(object->address()));
2765  *failed_object = object;
2766  }
2767  return false;
2768  }
2769  }
2770  if (iteration_mode == kClearMarkbits) {
2771  marking_state->ClearLiveness(chunk);
2772  }
2773  return true;
2774 }
2775 
2776 template <class Visitor, typename MarkingState>
2777 void LiveObjectVisitor::VisitBlackObjectsNoFail(MemoryChunk* chunk,
2778  MarkingState* marking_state,
2779  Visitor* visitor,
2780  IterationMode iteration_mode) {
2781  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2782  "LiveObjectVisitor::VisitBlackObjectsNoFail");
2783  for (auto object_and_size :
2784  LiveObjectRange<kBlackObjects>(chunk, marking_state->bitmap(chunk))) {
2785  HeapObject* const object = object_and_size.first;
2786  DCHECK(marking_state->IsBlack(object));
2787  const bool success = visitor->Visit(object, object_and_size.second);
2788  USE(success);
2789  DCHECK(success);
2790  }
2791  if (iteration_mode == kClearMarkbits) {
2792  marking_state->ClearLiveness(chunk);
2793  }
2794 }
2795 
2796 template <class Visitor, typename MarkingState>
2797 void LiveObjectVisitor::VisitGreyObjectsNoFail(MemoryChunk* chunk,
2798  MarkingState* marking_state,
2799  Visitor* visitor,
2800  IterationMode iteration_mode) {
2801  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2802  "LiveObjectVisitor::VisitGreyObjectsNoFail");
2803  for (auto object_and_size :
2804  LiveObjectRange<kGreyObjects>(chunk, marking_state->bitmap(chunk))) {
2805  HeapObject* const object = object_and_size.first;
2806  DCHECK(marking_state->IsGrey(object));
2807  const bool success = visitor->Visit(object, object_and_size.second);
2808  USE(success);
2809  DCHECK(success);
2810  }
2811  if (iteration_mode == kClearMarkbits) {
2812  marking_state->ClearLiveness(chunk);
2813  }
2814 }
2815 
2816 template <typename MarkingState>
2817 void LiveObjectVisitor::RecomputeLiveBytes(MemoryChunk* chunk,
2818  MarkingState* marking_state) {
2819  int new_live_size = 0;
2820  for (auto object_and_size :
2821  LiveObjectRange<kAllLiveObjects>(chunk, marking_state->bitmap(chunk))) {
2822  new_live_size += object_and_size.second;
2823  }
2824  marking_state->SetLiveBytes(chunk, new_live_size);
2825 }
2826 
2827 void MarkCompactCollector::Evacuate() {
2828  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
2829  base::MutexGuard guard(heap()->relocation_mutex());
2830 
2831  {
2832  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_PROLOGUE);
2833  EvacuatePrologue();
2834  }
2835 
2836  {
2837  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_COPY);
2838  EvacuationScope evacuation_scope(this);
2839  EvacuatePagesInParallel();
2840  }
2841 
2842  UpdatePointersAfterEvacuation();
2843 
2844  {
2845  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_REBALANCE);
2846  if (!heap()->new_space()->Rebalance()) {
2847  heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
2848  }
2849  }
2850 
2851  // Give pages that are queued to be freed back to the OS. Note that filtering
2852  // slots only handles old space (for unboxed doubles), and thus map space can
2853  // still contain stale pointers. We only free the chunks after pointer updates
2854  // to still have access to page headers.
2855  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
2856 
2857  {
2858  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
2859 
2860  for (Page* p : new_space_evacuation_pages_) {
2861  if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2862  p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
2863  sweeper()->AddPageForIterability(p);
2864  } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
2865  p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
2866  DCHECK_EQ(OLD_SPACE, p->owner()->identity());
2867  sweeper()->AddPage(OLD_SPACE, p, Sweeper::REGULAR);
2868  }
2869  }
2870  new_space_evacuation_pages_.clear();
2871 
2872  for (Page* p : old_space_evacuation_pages_) {
2873  // Important: skip list should be cleared only after roots were updated
2874  // because root iteration traverses the stack and might have to find
2875  // code objects from non-updated pc pointing into evacuation candidate.
2876  SkipList* list = p->skip_list();
2877  if (list != nullptr) list->Clear();
2878  if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
2879  sweeper()->AddPage(p->owner()->identity(), p, Sweeper::REGULAR);
2880  p->ClearFlag(Page::COMPACTION_WAS_ABORTED);
2881  }
2882  }
2883  }
2884 
2885  {
2886  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_EPILOGUE);
2887  EvacuateEpilogue();
2888  }
2889 
2890 #ifdef VERIFY_HEAP
2891  if (FLAG_verify_heap && !sweeper()->sweeping_in_progress()) {
2892  FullEvacuationVerifier verifier(heap());
2893  verifier.Run();
2894  }
2895 #endif
2896 }
2897 
2899  public:
2900  ~UpdatingItem() override = default;
2901  virtual void Process() = 0;
2902 };
2903 
2905  public:
2906  explicit PointersUpdatingTask(Isolate* isolate,
2907  GCTracer::BackgroundScope::ScopeId scope)
2908  : ItemParallelJob::Task(isolate),
2909  tracer_(isolate->heap()->tracer()),
2910  scope_(scope) {}
2911 
2912  void RunInParallel() override {
2913  TRACE_BACKGROUND_GC(tracer_, scope_);
2914  UpdatingItem* item = nullptr;
2915  while ((item = GetItem<UpdatingItem>()) != nullptr) {
2916  item->Process();
2917  item->MarkFinished();
2918  }
2919  };
2920 
2921  private:
2922  GCTracer* tracer_;
2923  GCTracer::BackgroundScope::ScopeId scope_;
2924 };
2925 
2926 template <typename MarkingState>
2928  public:
2929  explicit ToSpaceUpdatingItem(MemoryChunk* chunk, Address start, Address end,
2930  MarkingState* marking_state)
2931  : chunk_(chunk),
2932  start_(start),
2933  end_(end),
2934  marking_state_(marking_state) {}
2935  ~ToSpaceUpdatingItem() override = default;
2936 
2937  void Process() override {
2938  if (chunk_->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
2939  // New->new promoted pages contain garbage so they require iteration using
2940  // markbits.
2941  ProcessVisitLive();
2942  } else {
2943  ProcessVisitAll();
2944  }
2945  }
2946 
2947  private:
2948  void ProcessVisitAll() {
2949  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2950  "ToSpaceUpdatingItem::ProcessVisitAll");
2951  PointersUpdatingVisitor visitor(chunk_->heap());
2952  for (Address cur = start_; cur < end_;) {
2953  HeapObject* object = HeapObject::FromAddress(cur);
2954  Map map = object->map();
2955  int size = object->SizeFromMap(map);
2956  object->IterateBodyFast(map, size, &visitor);
2957  cur += size;
2958  }
2959  }
2960 
2961  void ProcessVisitLive() {
2962  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2963  "ToSpaceUpdatingItem::ProcessVisitLive");
2964  // For young generation evacuations we want to visit grey objects, for
2965  // full MC, we need to visit black objects.
2966  PointersUpdatingVisitor visitor(chunk_->heap());
2967  for (auto object_and_size : LiveObjectRange<kAllLiveObjects>(
2968  chunk_, marking_state_->bitmap(chunk_))) {
2969  object_and_size.first->IterateBodyFast(&visitor);
2970  }
2971  }
2972 
2973  MemoryChunk* chunk_;
2974  Address start_;
2975  Address end_;
2976  MarkingState* marking_state_;
2977 };
2978 
2979 template <typename MarkingState>
2981  public:
2982  explicit RememberedSetUpdatingItem(Heap* heap, MarkingState* marking_state,
2983  MemoryChunk* chunk,
2984  RememberedSetUpdatingMode updating_mode)
2985  : heap_(heap),
2986  marking_state_(marking_state),
2987  chunk_(chunk),
2988  updating_mode_(updating_mode) {}
2989  ~RememberedSetUpdatingItem() override = default;
2990 
2991  void Process() override {
2992  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
2993  "RememberedSetUpdatingItem::Process");
2994  base::MutexGuard guard(chunk_->mutex());
2995  CodePageMemoryModificationScope memory_modification_scope(chunk_);
2996  UpdateUntypedPointers();
2997  UpdateTypedPointers();
2998  }
2999 
3000  private:
3001  inline SlotCallbackResult CheckAndUpdateOldToNewSlot(MaybeObjectSlot slot) {
3002  HeapObject* heap_object;
3003  if (!(*slot)->GetHeapObject(&heap_object)) {
3004  return REMOVE_SLOT;
3005  }
3006  if (Heap::InFromSpace(heap_object)) {
3007  MapWord map_word = heap_object->map_word();
3008  if (map_word.IsForwardingAddress()) {
3009  HeapObjectReference::Update(HeapObjectSlot(slot),
3010  map_word.ToForwardingAddress());
3011  }
3012  bool success = (*slot)->GetHeapObject(&heap_object);
3013  USE(success);
3014  DCHECK(success);
3015  // If the object was in from space before and is after executing the
3016  // callback in to space, the object is still live.
3017  // Unfortunately, we do not know about the slot. It could be in a
3018  // just freed free space object.
3019  if (Heap::InToSpace(heap_object)) {
3020  return KEEP_SLOT;
3021  }
3022  } else if (Heap::InToSpace(heap_object)) {
3023  // Slots can point to "to" space if the page has been moved, or if the
3024  // slot has been recorded multiple times in the remembered set, or
3025  // if the slot was already updated during old->old updating.
3026  // In case the page has been moved, check markbits to determine liveness
3027  // of the slot. In the other case, the slot can just be kept.
3028  if (Page::FromAddress(heap_object->address())
3029  ->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) {
3030  // IsBlackOrGrey is required because objects are marked as grey for
3031  // the young generation collector while they are black for the full
3032  // MC.);
3033  if (marking_state_->IsBlackOrGrey(heap_object)) {
3034  return KEEP_SLOT;
3035  } else {
3036  return REMOVE_SLOT;
3037  }
3038  }
3039  return KEEP_SLOT;
3040  } else {
3041  DCHECK(!Heap::InNewSpace(heap_object));
3042  }
3043  return REMOVE_SLOT;
3044  }
3045 
3046  void UpdateUntypedPointers() {
3047  if (chunk_->slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() != nullptr) {
3049  chunk_,
3050  [this](MaybeObjectSlot slot) {
3051  return CheckAndUpdateOldToNewSlot(slot);
3052  },
3053  SlotSet::PREFREE_EMPTY_BUCKETS);
3054  }
3055  if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3056  (chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
3057  InvalidatedSlotsFilter filter(chunk_);
3059  chunk_,
3060  [&filter](MaybeObjectSlot slot) {
3061  if (!filter.IsValid(slot.address())) return REMOVE_SLOT;
3062  return UpdateSlot<AccessMode::NON_ATOMIC>(slot);
3063  },
3064  SlotSet::PREFREE_EMPTY_BUCKETS);
3065  }
3066  if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3067  chunk_->invalidated_slots() != nullptr) {
3068 #ifdef DEBUG
3069  for (auto object_size : *chunk_->invalidated_slots()) {
3070  HeapObject* object = object_size.first;
3071  int size = object_size.second;
3072  DCHECK_LE(object->SizeFromMap(object->map()), size);
3073  }
3074 #endif
3075  // The invalidated slots are not needed after old-to-old slots were
3076  // processsed.
3077  chunk_->ReleaseInvalidatedSlots();
3078  }
3079  }
3080 
3081  void UpdateTypedPointers() {
3082  if (chunk_->typed_slot_set<OLD_TO_NEW, AccessMode::NON_ATOMIC>() !=
3083  nullptr) {
3084  CHECK_NE(chunk_->owner(), heap_->map_space());
3085  const auto check_and_update_old_to_new_slot_fn =
3086  [this](MaybeObjectSlot slot) {
3087  return CheckAndUpdateOldToNewSlot(slot);
3088  };
3090  chunk_, [=](SlotType slot_type, Address host_addr, Address slot) {
3091  return UpdateTypedSlotHelper::UpdateTypedSlot(
3092  heap_, slot_type, slot, check_and_update_old_to_new_slot_fn);
3093  });
3094  }
3095  if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
3096  (chunk_->typed_slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() !=
3097  nullptr)) {
3098  CHECK_NE(chunk_->owner(), heap_->map_space());
3100  chunk_, [this](SlotType slot_type, Address host_addr, Address slot) {
3101  // Using UpdateStrongSlot is OK here, because there are no weak
3102  // typed slots.
3103  return UpdateTypedSlotHelper::UpdateTypedSlot(
3104  heap_, slot_type, slot,
3105  UpdateStrongSlot<AccessMode::NON_ATOMIC>);
3106  });
3107  }
3108  }
3109 
3110  Heap* heap_;
3111  MarkingState* marking_state_;
3112  MemoryChunk* chunk_;
3113  RememberedSetUpdatingMode updating_mode_;
3114 };
3115 
3116 UpdatingItem* MarkCompactCollector::CreateToSpaceUpdatingItem(
3117  MemoryChunk* chunk, Address start, Address end) {
3119  chunk, start, end, non_atomic_marking_state());
3120 }
3121 
3122 UpdatingItem* MarkCompactCollector::CreateRememberedSetUpdatingItem(
3123  MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
3124  return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
3125  heap(), non_atomic_marking_state(), chunk, updating_mode);
3126 }
3127 
3129  public:
3130  GlobalHandlesUpdatingItem(Heap* heap, GlobalHandles* global_handles,
3131  size_t start, size_t end)
3132  : heap_(heap),
3133  global_handles_(global_handles),
3134  start_(start),
3135  end_(end) {}
3136  ~GlobalHandlesUpdatingItem() override = default;
3137 
3138  void Process() override {
3139  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3140  "GlobalHandlesUpdatingItem::Process");
3141  PointersUpdatingVisitor updating_visitor(heap_);
3142  global_handles_->IterateNewSpaceRoots(&updating_visitor, start_, end_);
3143  }
3144 
3145  private:
3146  Heap* heap_;
3147  GlobalHandles* global_handles_;
3148  size_t start_;
3149  size_t end_;
3150 };
3151 
3152 // Update array buffers on a page that has been evacuated by copying objects.
3153 // Target page exclusivity in old space is guaranteed by the fact that
3154 // evacuation tasks either (a) retrieved a fresh page, or (b) retrieved all
3155 // free list items of a given page. For new space the tracker will update
3156 // using a lock.
3158  public:
3159  enum EvacuationState { kRegular, kAborted };
3160 
3161  explicit ArrayBufferTrackerUpdatingItem(Page* page, EvacuationState state)
3162  : page_(page), state_(state) {}
3163  ~ArrayBufferTrackerUpdatingItem() override = default;
3164 
3165  void Process() override {
3166  TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
3167  "ArrayBufferTrackerUpdatingItem::Process", "EvacuationState",
3168  state_);
3169  switch (state_) {
3170  case EvacuationState::kRegular:
3171  ArrayBufferTracker::ProcessBuffers(
3172  page_, ArrayBufferTracker::kUpdateForwardedRemoveOthers);
3173  break;
3174  case EvacuationState::kAborted:
3175  ArrayBufferTracker::ProcessBuffers(
3176  page_, ArrayBufferTracker::kUpdateForwardedKeepOthers);
3177  break;
3178  }
3179  }
3180 
3181  private:
3182  Page* const page_;
3183  const EvacuationState state_;
3184 };
3185 
3186 int MarkCompactCollectorBase::CollectToSpaceUpdatingItems(
3187  ItemParallelJob* job) {
3188  // Seed to space pages.
3189  const Address space_start = heap()->new_space()->first_allocatable_address();
3190  const Address space_end = heap()->new_space()->top();
3191  int pages = 0;
3192  for (Page* page : PageRange(space_start, space_end)) {
3193  Address start =
3194  page->Contains(space_start) ? space_start : page->area_start();
3195  Address end = page->Contains(space_end) ? space_end : page->area_end();
3196  job->AddItem(CreateToSpaceUpdatingItem(page, start, end));
3197  pages++;
3198  }
3199  if (pages == 0) return 0;
3200  return NumberOfParallelToSpacePointerUpdateTasks(pages);
3201 }
3202 
3203 template <typename IterateableSpace>
3204 int MarkCompactCollectorBase::CollectRememberedSetUpdatingItems(
3205  ItemParallelJob* job, IterateableSpace* space,
3206  RememberedSetUpdatingMode mode) {
3207  int pages = 0;
3208  for (MemoryChunk* chunk : *space) {
3209  const bool contains_old_to_old_slots =
3210  chunk->slot_set<OLD_TO_OLD>() != nullptr ||
3211  chunk->typed_slot_set<OLD_TO_OLD>() != nullptr;
3212  const bool contains_old_to_new_slots =
3213  chunk->slot_set<OLD_TO_NEW>() != nullptr ||
3214  chunk->typed_slot_set<OLD_TO_NEW>() != nullptr;
3215  const bool contains_invalidated_slots =
3216  chunk->invalidated_slots() != nullptr;
3217  if (!contains_old_to_new_slots && !contains_old_to_old_slots &&
3218  !contains_invalidated_slots)
3219  continue;
3220  if (mode == RememberedSetUpdatingMode::ALL || contains_old_to_new_slots ||
3221  contains_invalidated_slots) {
3222  job->AddItem(CreateRememberedSetUpdatingItem(chunk, mode));
3223  pages++;
3224  }
3225  }
3226  return pages;
3227 }
3228 
3229 int MarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
3230  ItemParallelJob* job) {
3231  int pages = 0;
3232  for (Page* p : new_space_evacuation_pages_) {
3233  if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
3234  if (p->local_tracker() == nullptr) continue;
3235 
3236  pages++;
3237  job->AddItem(new ArrayBufferTrackerUpdatingItem(
3238  p, ArrayBufferTrackerUpdatingItem::kRegular));
3239  }
3240  }
3241  return pages;
3242 }
3243 
3244 int MarkCompactCollector::CollectOldSpaceArrayBufferTrackerItems(
3245  ItemParallelJob* job) {
3246  int pages = 0;
3247  for (Page* p : old_space_evacuation_pages_) {
3248  if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsOldToOld &&
3249  p->IsEvacuationCandidate()) {
3250  if (p->local_tracker() == nullptr) continue;
3251 
3252  pages++;
3253  job->AddItem(new ArrayBufferTrackerUpdatingItem(
3254  p, ArrayBufferTrackerUpdatingItem::kRegular));
3255  }
3256  }
3257  for (auto object_and_page : aborted_evacuation_candidates_) {
3258  Page* p = object_and_page.second;
3259  if (p->local_tracker() == nullptr) continue;
3260 
3261  pages++;
3262  job->AddItem(new ArrayBufferTrackerUpdatingItem(
3263  p, ArrayBufferTrackerUpdatingItem::kAborted));
3264  }
3265  return pages;
3266 }
3267 
3268 void MarkCompactCollector::UpdatePointersAfterEvacuation() {
3269  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS);
3270 
3271  PointersUpdatingVisitor updating_visitor(heap());
3272 
3273  {
3274  TRACE_GC(heap()->tracer(),
3275  GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3276  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3277  }
3278 
3279  {
3280  TRACE_GC(heap()->tracer(),
3281  GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAIN);
3282  ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3283  &page_parallel_job_semaphore_);
3284 
3285  int remembered_set_pages = 0;
3286  remembered_set_pages += CollectRememberedSetUpdatingItems(
3287  &updating_job, heap()->old_space(), RememberedSetUpdatingMode::ALL);
3288  remembered_set_pages += CollectRememberedSetUpdatingItems(
3289  &updating_job, heap()->code_space(), RememberedSetUpdatingMode::ALL);
3290  remembered_set_pages += CollectRememberedSetUpdatingItems(
3291  &updating_job, heap()->lo_space(), RememberedSetUpdatingMode::ALL);
3292  remembered_set_pages += CollectRememberedSetUpdatingItems(
3293  &updating_job, heap()->code_lo_space(), RememberedSetUpdatingMode::ALL);
3294  const int remembered_set_tasks =
3295  remembered_set_pages == 0
3296  ? 0
3297  : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3298  old_to_new_slots_);
3299  const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3300  const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3301  for (int i = 0; i < num_tasks; i++) {
3302  updating_job.AddTask(new PointersUpdatingTask(
3303  isolate(),
3304  GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3305  }
3306  updating_job.Run(isolate()->async_counters());
3307  }
3308 
3309  {
3310  // - Update pointers in map space in a separate phase to avoid data races
3311  // with Map->LayoutDescriptor edge.
3312  // - Update array buffer trackers in the second phase to have access to
3313  // byte length which is potentially a HeapNumber.
3314  TRACE_GC(heap()->tracer(),
3315  GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_SLOTS_MAP_SPACE);
3316  ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3317  &page_parallel_job_semaphore_);
3318 
3319  int array_buffer_pages = 0;
3320  array_buffer_pages += CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3321  array_buffer_pages += CollectOldSpaceArrayBufferTrackerItems(&updating_job);
3322 
3323  int remembered_set_pages = 0;
3324  remembered_set_pages += CollectRememberedSetUpdatingItems(
3325  &updating_job, heap()->map_space(), RememberedSetUpdatingMode::ALL);
3326  const int remembered_set_tasks =
3327  remembered_set_pages == 0
3328  ? 0
3329  : NumberOfParallelPointerUpdateTasks(remembered_set_pages,
3330  old_to_new_slots_);
3331  const int num_tasks = Max(array_buffer_pages, remembered_set_tasks);
3332  if (num_tasks > 0) {
3333  for (int i = 0; i < num_tasks; i++) {
3334  updating_job.AddTask(new PointersUpdatingTask(
3335  isolate(),
3336  GCTracer::BackgroundScope::MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3337  }
3338  updating_job.Run(isolate()->async_counters());
3339  heap()->array_buffer_collector()->FreeAllocations();
3340  }
3341  }
3342 
3343  {
3344  TRACE_GC(heap()->tracer(),
3345  GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_WEAK);
3346  // Update pointers from external string table.
3347  heap_->UpdateReferencesInExternalStringTable(
3348  &UpdateReferenceInExternalStringTableEntry);
3349 
3350  EvacuationWeakObjectRetainer evacuation_object_retainer;
3351  heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3352  }
3353 }
3354 
3355 void MarkCompactCollector::ReportAbortedEvacuationCandidate(
3356  HeapObject* failed_object, MemoryChunk* chunk) {
3357  base::MutexGuard guard(&mutex_);
3358 
3359  aborted_evacuation_candidates_.push_back(
3360  std::make_pair(failed_object, static_cast<Page*>(chunk)));
3361 }
3362 
3363 void MarkCompactCollector::PostProcessEvacuationCandidates() {
3364  for (auto object_and_page : aborted_evacuation_candidates_) {
3365  HeapObject* failed_object = object_and_page.first;
3366  Page* page = object_and_page.second;
3367  page->SetFlag(Page::COMPACTION_WAS_ABORTED);
3368  // Aborted compaction page. We have to record slots here, since we
3369  // might not have recorded them in first place.
3370 
3371  // Remove outdated slots.
3372  RememberedSet<OLD_TO_NEW>::RemoveRange(page, page->address(),
3373  failed_object->address(),
3374  SlotSet::PREFREE_EMPTY_BUCKETS);
3375  RememberedSet<OLD_TO_NEW>::RemoveRangeTyped(page, page->address(),
3376  failed_object->address());
3377  // Recompute live bytes.
3378  LiveObjectVisitor::RecomputeLiveBytes(page, non_atomic_marking_state());
3379  // Re-record slots.
3380  EvacuateRecordOnlyVisitor record_visitor(heap());
3381  LiveObjectVisitor::VisitBlackObjectsNoFail(page, non_atomic_marking_state(),
3382  &record_visitor,
3383  LiveObjectVisitor::kKeepMarking);
3384  // Array buffers will be processed during pointer updating.
3385  }
3386  const int aborted_pages =
3387  static_cast<int>(aborted_evacuation_candidates_.size());
3388  int aborted_pages_verified = 0;
3389  for (Page* p : old_space_evacuation_pages_) {
3390  if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3391  // After clearing the evacuation candidate flag the page is again in a
3392  // regular state.
3393  p->ClearEvacuationCandidate();
3394  aborted_pages_verified++;
3395  } else {
3396  DCHECK(p->IsEvacuationCandidate());
3397  DCHECK(p->SweepingDone());
3398  p->owner()->memory_chunk_list().Remove(p);
3399  }
3400  }
3401  DCHECK_EQ(aborted_pages_verified, aborted_pages);
3402  if (FLAG_trace_evacuation && (aborted_pages > 0)) {
3403  PrintIsolate(isolate(), "%8.0f ms: evacuation: aborted=%d\n",
3404  isolate()->time_millis_since_init(), aborted_pages);
3405  }
3406 }
3407 
3408 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3409  for (Page* p : old_space_evacuation_pages_) {
3410  if (!p->IsEvacuationCandidate()) continue;
3411  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3412  non_atomic_marking_state()->SetLiveBytes(p, 0);
3413  CHECK(p->SweepingDone());
3414  space->ReleasePage(p);
3415  }
3416  old_space_evacuation_pages_.clear();
3417  compacting_ = false;
3418 }
3419 
3420 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3421  space->ClearStats();
3422 
3423  int will_be_swept = 0;
3424  bool unused_page_present = false;
3425 
3426  // Loop needs to support deletion if live bytes == 0 for a page.
3427  for (auto it = space->begin(); it != space->end();) {
3428  Page* p = *(it++);
3429  DCHECK(p->SweepingDone());
3430 
3431  if (p->IsEvacuationCandidate()) {
3432  // Will be processed in Evacuate.
3433  DCHECK(!evacuation_candidates_.empty());
3434  continue;
3435  }
3436 
3437  if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3438  // We need to sweep the page to get it into an iterable state again. Note
3439  // that this adds unusable memory into the free list that is later on
3440  // (in the free list) dropped again. Since we only use the flag for
3441  // testing this is fine.
3442  p->set_concurrent_sweeping_state(Page::kSweepingInProgress);
3443  sweeper()->RawSweep(p, Sweeper::IGNORE_FREE_LIST,
3444  Heap::ShouldZapGarbage()
3445  ? FreeSpaceTreatmentMode::ZAP_FREE_SPACE
3446  : FreeSpaceTreatmentMode::IGNORE_FREE_SPACE);
3447  space->IncreaseAllocatedBytes(p->allocated_bytes(), p);
3448  continue;
3449  }
3450 
3451  // One unused page is kept, all further are released before sweeping them.
3452  if (non_atomic_marking_state()->live_bytes(p) == 0) {
3453  if (unused_page_present) {
3454  if (FLAG_gc_verbose) {
3455  PrintIsolate(isolate(), "sweeping: released page: %p",
3456  static_cast<void*>(p));
3457  }
3458  ArrayBufferTracker::FreeAll(p);
3459  space->memory_chunk_list().Remove(p);
3460  space->ReleasePage(p);
3461  continue;
3462  }
3463  unused_page_present = true;
3464  }
3465 
3466  sweeper()->AddPage(space->identity(), p, Sweeper::REGULAR);
3467  will_be_swept++;
3468  }
3469 
3470  if (FLAG_gc_verbose) {
3471  PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3472  space->name(), will_be_swept);
3473  }
3474 }
3475 
3476 void MarkCompactCollector::StartSweepSpaces() {
3477  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
3478 #ifdef DEBUG
3479  state_ = SWEEP_SPACES;
3480 #endif
3481 
3482  {
3483  {
3484  GCTracer::Scope sweep_scope(heap()->tracer(),
3485  GCTracer::Scope::MC_SWEEP_OLD);
3486  StartSweepSpace(heap()->old_space());
3487  }
3488  {
3489  GCTracer::Scope sweep_scope(heap()->tracer(),
3490  GCTracer::Scope::MC_SWEEP_CODE);
3491  StartSweepSpace(heap()->code_space());
3492  }
3493  {
3494  GCTracer::Scope sweep_scope(heap()->tracer(),
3495  GCTracer::Scope::MC_SWEEP_MAP);
3496  StartSweepSpace(heap()->map_space());
3497  }
3498  sweeper()->StartSweeping();
3499  }
3500 }
3501 
3502 void MarkCompactCollector::MarkingWorklist::PrintWorklist(
3503  const char* worklist_name, ConcurrentMarkingWorklist* worklist) {
3504  std::map<InstanceType, int> count;
3505  int total_count = 0;
3506  worklist->IterateGlobalPool([&count, &total_count](HeapObject* obj) {
3507  ++total_count;
3508  count[obj->map()->instance_type()]++;
3509  });
3510  std::vector<std::pair<int, InstanceType>> rank;
3511  rank.reserve(count.size());
3512  for (const auto& i : count) {
3513  rank.emplace_back(i.second, i.first);
3514  }
3515  std::map<InstanceType, std::string> instance_type_name;
3516 #define INSTANCE_TYPE_NAME(name) instance_type_name[name] = #name;
3517  INSTANCE_TYPE_LIST(INSTANCE_TYPE_NAME)
3518 #undef INSTANCE_TYPE_NAME
3519  std::sort(rank.begin(), rank.end(),
3520  std::greater<std::pair<int, InstanceType>>());
3521  PrintF("Worklist %s: %d\n", worklist_name, total_count);
3522  for (auto i : rank) {
3523  PrintF(" [%s]: %d\n", instance_type_name[i.second].c_str(), i.first);
3524  }
3525 }
3526 
3527 #ifdef ENABLE_MINOR_MC
3528 
3529 namespace {
3530 
3531 #ifdef VERIFY_HEAP
3532 
3533 class YoungGenerationMarkingVerifier : public MarkingVerifier {
3534  public:
3535  explicit YoungGenerationMarkingVerifier(Heap* heap)
3536  : MarkingVerifier(heap),
3537  marking_state_(
3538  heap->minor_mark_compact_collector()->non_atomic_marking_state()) {}
3539 
3540  Bitmap* bitmap(const MemoryChunk* chunk) override {
3541  return marking_state_->bitmap(chunk);
3542  }
3543 
3544  bool IsMarked(HeapObject* object) override {
3545  return marking_state_->IsGrey(object);
3546  }
3547 
3548  bool IsBlackOrGrey(HeapObject* object) override {
3549  return marking_state_->IsBlackOrGrey(object);
3550  }
3551 
3552  void Run() override {
3553  VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3554  VerifyMarking(heap_->new_space());
3555  }
3556 
3557  void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3558  for (ObjectSlot current = start; current < end; ++current) {
3559  DCHECK(!HasWeakHeapObjectTag(*current));
3560  if ((*current)->IsHeapObject()) {
3561  HeapObject* object = HeapObject::cast(*current);
3562  if (!Heap::InNewSpace(object)) return;
3563  CHECK(IsMarked(object));
3564  }
3565  }
3566  }
3567 
3568  void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3569  for (MaybeObjectSlot current = start; current < end; ++current) {
3570  HeapObject* object;
3571  // Minor MC treats weak references as strong.
3572  if ((*current)->GetHeapObject(&object)) {
3573  if (!Heap::InNewSpace(object)) {
3574  continue;
3575  }
3576  CHECK(IsMarked(object));
3577  }
3578  }
3579  }
3580 
3581  private:
3582  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
3583 };
3584 
3585 class YoungGenerationEvacuationVerifier : public EvacuationVerifier {
3586  public:
3587  explicit YoungGenerationEvacuationVerifier(Heap* heap)
3588  : EvacuationVerifier(heap) {}
3589 
3590  void Run() override {
3591  VerifyRoots(VISIT_ALL_IN_SCAVENGE);
3592  VerifyEvacuation(heap_->new_space());
3593  VerifyEvacuation(heap_->old_space());
3594  VerifyEvacuation(heap_->code_space());
3595  VerifyEvacuation(heap_->map_space());
3596  }
3597 
3598  protected:
3599  void VerifyPointers(ObjectSlot start, ObjectSlot end) override {
3600  for (ObjectSlot current = start; current < end; ++current) {
3601  if ((*current)->IsHeapObject()) {
3602  HeapObject* object = HeapObject::cast(*current);
3603  CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
3604  }
3605  }
3606  }
3607  void VerifyPointers(MaybeObjectSlot start, MaybeObjectSlot end) override {
3608  for (MaybeObjectSlot current = start; current < end; ++current) {
3609  HeapObject* object;
3610  if ((*current)->GetHeapObject(&object)) {
3611  CHECK_IMPLIES(Heap::InNewSpace(object), Heap::InToSpace(object));
3612  }
3613  }
3614  }
3615 };
3616 
3617 #endif // VERIFY_HEAP
3618 
3619 template <class ParallelItem>
3620 void SeedGlobalHandles(Heap* heap, GlobalHandles* global_handles,
3621  ItemParallelJob* job) {
3622  // Create batches of global handles.
3623  const size_t kGlobalHandlesBufferSize = 1000;
3624  const size_t new_space_nodes = global_handles->NumberOfNewSpaceNodes();
3625  for (size_t start = 0; start < new_space_nodes;
3626  start += kGlobalHandlesBufferSize) {
3627  size_t end = start + kGlobalHandlesBufferSize;
3628  if (end > new_space_nodes) end = new_space_nodes;
3629  job->AddItem(new ParallelItem(heap, global_handles, start, end));
3630  }
3631 }
3632 
3633 bool IsUnmarkedObjectForYoungGeneration(Heap* heap, ObjectSlot p) {
3634  DCHECK_IMPLIES(Heap::InNewSpace(*p), Heap::InToSpace(*p));
3635  return Heap::InNewSpace(*p) && !heap->minor_mark_compact_collector()
3636  ->non_atomic_marking_state()
3637  ->IsGrey(HeapObject::cast(*p));
3638 }
3639 
3640 } // namespace
3641 
3642 class YoungGenerationMarkingVisitor final
3643  : public NewSpaceVisitor<YoungGenerationMarkingVisitor> {
3644  public:
3645  YoungGenerationMarkingVisitor(
3646  MinorMarkCompactCollector::MarkingState* marking_state,
3647  MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
3648  : worklist_(global_worklist, task_id), marking_state_(marking_state) {}
3649 
3650  V8_INLINE void VisitPointers(HeapObject* host, ObjectSlot start,
3651  ObjectSlot end) final {
3652  for (ObjectSlot p = start; p < end; ++p) {
3653  VisitPointer(host, p);
3654  }
3655  }
3656 
3657  V8_INLINE void VisitPointers(HeapObject* host, MaybeObjectSlot start,
3658  MaybeObjectSlot end) final {
3659  for (MaybeObjectSlot p = start; p < end; ++p) {
3660  VisitPointer(host, p);
3661  }
3662  }
3663 
3664  V8_INLINE void VisitPointer(HeapObject* host, ObjectSlot slot) final {
3665  Object* target = *slot;
3666  DCHECK(!HasWeakHeapObjectTag(target));
3667  if (Heap::InNewSpace(target)) {
3668  HeapObject* target_object = HeapObject::cast(target);
3669  MarkObjectViaMarkingWorklist(target_object);
3670  }
3671  }
3672 
3673  V8_INLINE void VisitPointer(HeapObject* host, MaybeObjectSlot slot) final {
3674  MaybeObject target = *slot;
3675  if (Heap::InNewSpace(target)) {
3676  HeapObject* target_object;
3677  // Treat weak references as strong. TODO(marja): Proper weakness handling
3678  // for minor-mcs.
3679  if (target->GetHeapObject(&target_object)) {
3680  MarkObjectViaMarkingWorklist(target_object);
3681  }
3682  }
3683  }
3684 
3685  private:
3686  inline void MarkObjectViaMarkingWorklist(HeapObject* object) {
3687  if (marking_state_->WhiteToGrey(object)) {
3688  // Marking deque overflow is unsupported for the young generation.
3689  CHECK(worklist_.Push(object));
3690  }
3691  }
3692 
3693  MinorMarkCompactCollector::MarkingWorklist::View worklist_;
3694  MinorMarkCompactCollector::MarkingState* marking_state_;
3695 };
3696 
3697 void MinorMarkCompactCollector::SetUp() {}
3698 
3699 void MinorMarkCompactCollector::TearDown() {}
3700 
3701 MinorMarkCompactCollector::MinorMarkCompactCollector(Heap* heap)
3702  : MarkCompactCollectorBase(heap),
3703  worklist_(new MinorMarkCompactCollector::MarkingWorklist()),
3704  main_marking_visitor_(new YoungGenerationMarkingVisitor(
3705  marking_state(), worklist_, kMainMarker)),
3706  page_parallel_job_semaphore_(0) {
3707  static_assert(
3708  kNumMarkers <= MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks,
3709  "more marker tasks than marking deque can handle");
3710 }
3711 
3712 MinorMarkCompactCollector::~MinorMarkCompactCollector() {
3713  delete worklist_;
3714  delete main_marking_visitor_;
3715 }
3716 
3717 int MinorMarkCompactCollector::NumberOfParallelMarkingTasks(int pages) {
3718  DCHECK_GT(pages, 0);
3719  if (!FLAG_minor_mc_parallel_marking) return 1;
3720  // Pages are not private to markers but we can still use them to estimate the
3721  // amount of marking that is required.
3722  const int kPagesPerTask = 2;
3723  const int wanted_tasks = Max(1, pages / kPagesPerTask);
3724  return Min(NumberOfAvailableCores(),
3725  Min(wanted_tasks,
3726  MinorMarkCompactCollector::MarkingWorklist::kMaxNumTasks));
3727 }
3728 
3729 void MinorMarkCompactCollector::CleanupSweepToIteratePages() {
3730  for (Page* p : sweep_to_iterate_pages_) {
3731  if (p->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
3732  p->ClearFlag(Page::SWEEP_TO_ITERATE);
3733  non_atomic_marking_state()->ClearLiveness(p);
3734  }
3735  }
3736  sweep_to_iterate_pages_.clear();
3737 }
3738 
3739 class YoungGenerationMigrationObserver final : public MigrationObserver {
3740  public:
3741  YoungGenerationMigrationObserver(Heap* heap,
3742  MarkCompactCollector* mark_compact_collector)
3743  : MigrationObserver(heap),
3744  mark_compact_collector_(mark_compact_collector) {}
3745 
3746  inline void Move(AllocationSpace dest, HeapObject* src, HeapObject* dst,
3747  int size) final {
3748  // Migrate color to old generation marking in case the object survived young
3749  // generation garbage collection.
3750  if (heap_->incremental_marking()->IsMarking()) {
3751  DCHECK(
3752  heap_->incremental_marking()->atomic_marking_state()->IsWhite(dst));
3753  heap_->incremental_marking()->TransferColor(src, dst);
3754  }
3755  }
3756 
3757  protected:
3758  base::Mutex mutex_;
3759  MarkCompactCollector* mark_compact_collector_;
3760 };
3761 
3762 class YoungGenerationRecordMigratedSlotVisitor final
3763  : public RecordMigratedSlotVisitor {
3764  public:
3765  explicit YoungGenerationRecordMigratedSlotVisitor(
3766  MarkCompactCollector* collector)
3767  : RecordMigratedSlotVisitor(collector) {}
3768 
3769  void VisitCodeTarget(Code host, RelocInfo* rinfo) final { UNREACHABLE(); }
3770  void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final {
3771  UNREACHABLE();
3772  }
3773 
3774  private:
3775  // Only record slots for host objects that are considered as live by the full
3776  // collector.
3777  inline bool IsLive(HeapObject* object) {
3778  return collector_->non_atomic_marking_state()->IsBlack(object);
3779  }
3780 
3781  inline void RecordMigratedSlot(HeapObject* host, MaybeObject value,
3782  Address slot) final {
3783  if (value->IsStrongOrWeak()) {
3784  Page* p = Page::FromAddress(value.ptr());
3785  if (p->InNewSpace()) {
3786  DCHECK_IMPLIES(p->InToSpace(),
3787  p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION));
3788  RememberedSet<OLD_TO_NEW>::Insert<AccessMode::NON_ATOMIC>(
3789  Page::FromAddress(slot), slot);
3790  } else if (p->IsEvacuationCandidate() && IsLive(host)) {
3791  RememberedSet<OLD_TO_OLD>::Insert<AccessMode::NON_ATOMIC>(
3792  Page::FromAddress(slot), slot);
3793  }
3794  }
3795  }
3796 };
3797 
3798 void MinorMarkCompactCollector::UpdatePointersAfterEvacuation() {
3799  TRACE_GC(heap()->tracer(),
3800  GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS);
3801 
3802  PointersUpdatingVisitor updating_visitor(heap());
3803  ItemParallelJob updating_job(isolate()->cancelable_task_manager(),
3804  &page_parallel_job_semaphore_);
3805 
3806  CollectNewSpaceArrayBufferTrackerItems(&updating_job);
3807  // Create batches of global handles.
3808  SeedGlobalHandles<GlobalHandlesUpdatingItem>(
3809  heap(), isolate()->global_handles(), &updating_job);
3810  const int to_space_tasks = CollectToSpaceUpdatingItems(&updating_job);
3811  int remembered_set_pages = 0;
3812  remembered_set_pages += CollectRememberedSetUpdatingItems(
3813  &updating_job, heap()->old_space(),
3814  RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3815  remembered_set_pages += CollectRememberedSetUpdatingItems(
3816  &updating_job, heap()->code_space(),
3817  RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3818  remembered_set_pages += CollectRememberedSetUpdatingItems(
3819  &updating_job, heap()->map_space(),
3820  RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3821  remembered_set_pages += CollectRememberedSetUpdatingItems(
3822  &updating_job, heap()->lo_space(),
3823  RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3824  remembered_set_pages += CollectRememberedSetUpdatingItems(
3825  &updating_job, heap()->code_lo_space(),
3826  RememberedSetUpdatingMode::OLD_TO_NEW_ONLY);
3827  const int remembered_set_tasks =
3828  remembered_set_pages == 0 ? 0
3829  : NumberOfParallelPointerUpdateTasks(
3830  remembered_set_pages, old_to_new_slots_);
3831  const int num_tasks = Max(to_space_tasks, remembered_set_tasks);
3832  for (int i = 0; i < num_tasks; i++) {
3833  updating_job.AddTask(new PointersUpdatingTask(
3834  isolate(), GCTracer::BackgroundScope::
3835  MINOR_MC_BACKGROUND_EVACUATE_UPDATE_POINTERS));
3836  }
3837 
3838  {
3839  TRACE_GC(heap()->tracer(),
3840  GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_TO_NEW_ROOTS);
3841  heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_MINOR_MC_UPDATE);
3842  }
3843  {
3844  TRACE_GC(heap()->tracer(),
3845  GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_SLOTS);
3846  updating_job.Run(isolate()->async_counters());
3847  heap()->array_buffer_collector()->FreeAllocations();
3848  }
3849 
3850  {
3851  TRACE_GC(heap()->tracer(),
3852  GCTracer::Scope::MINOR_MC_EVACUATE_UPDATE_POINTERS_WEAK);
3853 
3854  EvacuationWeakObjectRetainer evacuation_object_retainer;
3855  heap()->ProcessWeakListRoots(&evacuation_object_retainer);
3856 
3857  // Update pointers from external string table.
3858  heap()->UpdateNewSpaceReferencesInExternalStringTable(
3859  &UpdateReferenceInExternalStringTableEntry);
3860  }
3861 }
3862 
3863 class MinorMarkCompactCollector::RootMarkingVisitor : public RootVisitor {
3864  public:
3865  explicit RootMarkingVisitor(MinorMarkCompactCollector* collector)
3866  : collector_(collector) {}
3867 
3868  void VisitRootPointer(Root root, const char* description,
3869  ObjectSlot p) final {
3870  MarkObjectByPointer(p);
3871  }
3872 
3873  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
3874  ObjectSlot end) final {
3875  for (ObjectSlot p = start; p < end; ++p) {
3876  MarkObjectByPointer(p);
3877  }
3878  }
3879 
3880  private:
3881  V8_INLINE void MarkObjectByPointer(ObjectSlot p) {
3882  if (!(*p)->IsHeapObject()) return;
3883  collector_->MarkRootObject(HeapObject::cast(*p));
3884  }
3885  MinorMarkCompactCollector* const collector_;
3886 };
3887 
3888 void MinorMarkCompactCollector::CollectGarbage() {
3889  {
3890  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_SWEEPING);
3891  heap()->mark_compact_collector()->sweeper()->EnsureIterabilityCompleted();
3892  CleanupSweepToIteratePages();
3893  }
3894 
3895  MarkLiveObjects();
3896  ClearNonLiveReferences();
3897 #ifdef VERIFY_HEAP
3898  if (FLAG_verify_heap) {
3899  YoungGenerationMarkingVerifier verifier(heap());
3900  verifier.Run();
3901  }
3902 #endif // VERIFY_HEAP
3903 
3904  Evacuate();
3905 #ifdef VERIFY_HEAP
3906  if (FLAG_verify_heap) {
3907  YoungGenerationEvacuationVerifier verifier(heap());
3908  verifier.Run();
3909  }
3910 #endif // VERIFY_HEAP
3911 
3912  {
3913  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARKING_DEQUE);
3914  heap()->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
3915  }
3916 
3917  {
3918  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_RESET_LIVENESS);
3919  for (Page* p :
3920  PageRange(heap()->new_space()->from_space().first_page(), nullptr)) {
3921  DCHECK(!p->IsFlagSet(Page::SWEEP_TO_ITERATE));
3922  non_atomic_marking_state()->ClearLiveness(p);
3923  if (FLAG_concurrent_marking) {
3924  // Ensure that concurrent marker does not track pages that are
3925  // going to be unmapped.
3926  heap()->concurrent_marking()->ClearLiveness(p);
3927  }
3928  }
3929  }
3930 
3931  RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
3932  heap(), [](MemoryChunk* chunk) {
3933  if (chunk->SweepingDone()) {
3934  RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
3935  } else {
3936  RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
3937  }
3938  });
3939 
3940  heap()->account_external_memory_concurrently_freed();
3941 }
3942 
3943 void MinorMarkCompactCollector::MakeIterable(
3944  Page* p, MarkingTreatmentMode marking_mode,
3945  FreeSpaceTreatmentMode free_space_mode) {
3946  // We have to clear the full collectors markbits for the areas that we
3947  // remove here.
3948  MarkCompactCollector* full_collector = heap()->mark_compact_collector();
3949  Address free_start = p->area_start();
3950 
3951  for (auto object_and_size :
3952  LiveObjectRange<kGreyObjects>(p, marking_state()->bitmap(p))) {
3953  HeapObject* const object = object_and_size.first;
3954  DCHECK(non_atomic_marking_state()->IsGrey(object));
3955  Address free_end = object->address();
3956  if (free_end != free_start) {
3957  CHECK_GT(free_end, free_start);
3958  size_t size = static_cast<size_t>(free_end - free_start);
3959  full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3960  p->AddressToMarkbitIndex(free_start),
3961  p->AddressToMarkbitIndex(free_end));
3962  if (free_space_mode == ZAP_FREE_SPACE) {
3963  ZapCode(free_start, size);
3964  }
3965  p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3966  ClearRecordedSlots::kNo);
3967  }
3968  Map map = object->synchronized_map();
3969  int size = object->SizeFromMap(map);
3970  free_start = free_end + size;
3971  }
3972 
3973  if (free_start != p->area_end()) {
3974  CHECK_GT(p->area_end(), free_start);
3975  size_t size = static_cast<size_t>(p->area_end() - free_start);
3976  full_collector->non_atomic_marking_state()->bitmap(p)->ClearRange(
3977  p->AddressToMarkbitIndex(free_start),
3978  p->AddressToMarkbitIndex(p->area_end()));
3979  if (free_space_mode == ZAP_FREE_SPACE) {
3980  ZapCode(free_start, size);
3981  }
3982  p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
3983  ClearRecordedSlots::kNo);
3984  }
3985 
3986  if (marking_mode == MarkingTreatmentMode::CLEAR) {
3987  non_atomic_marking_state()->ClearLiveness(p);
3988  p->ClearFlag(Page::SWEEP_TO_ITERATE);
3989  }
3990 }
3991 
3992 namespace {
3993 
3994 // Helper class for pruning the string table.
3995 class YoungGenerationExternalStringTableCleaner : public RootVisitor {
3996  public:
3997  YoungGenerationExternalStringTableCleaner(
3998  MinorMarkCompactCollector* collector)
3999  : heap_(collector->heap()),
4000  marking_state_(collector->non_atomic_marking_state()) {}
4001 
4002  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
4003  ObjectSlot end) override {
4004  DCHECK_EQ(static_cast<int>(root),
4005  static_cast<int>(Root::kExternalStringsTable));
4006  // Visit all HeapObject pointers in [start, end).
4007  for (ObjectSlot p = start; p < end; ++p) {
4008  Object* o = *p;
4009  if (o->IsHeapObject()) {
4010  HeapObject* heap_object = HeapObject::cast(o);
4011  if (marking_state_->IsWhite(heap_object)) {
4012  if (o->IsExternalString()) {
4013  heap_->FinalizeExternalString(String::cast(*p));
4014  } else {
4015  // The original external string may have been internalized.
4016  DCHECK(o->IsThinString());
4017  }
4018  // Set the entry to the_hole_value (as deleted).
4019  p.store(ReadOnlyRoots(heap_).the_hole_value());
4020  }
4021  }
4022  }
4023  }
4024 
4025  private:
4026  Heap* heap_;
4027  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4028 };
4029 
4030 // Marked young generation objects and all old generation objects will be
4031 // retained.
4032 class MinorMarkCompactWeakObjectRetainer : public WeakObjectRetainer {
4033  public:
4034  explicit MinorMarkCompactWeakObjectRetainer(
4035  MinorMarkCompactCollector* collector)
4036  : marking_state_(collector->non_atomic_marking_state()) {}
4037 
4038  Object* RetainAs(Object* object) override {
4039  HeapObject* heap_object = HeapObject::cast(object);
4040  if (!Heap::InNewSpace(heap_object)) return object;
4041 
4042  // Young generation marking only marks to grey instead of black.
4043  DCHECK(!marking_state_->IsBlack(heap_object));
4044  if (marking_state_->IsGrey(heap_object)) {
4045  return object;
4046  }
4047  return nullptr;
4048  }
4049 
4050  private:
4051  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state_;
4052 };
4053 
4054 } // namespace
4055 
4056 void MinorMarkCompactCollector::ClearNonLiveReferences() {
4057  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR);
4058 
4059  {
4060  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_STRING_TABLE);
4061  // Internalized strings are always stored in old space, so there is no need
4062  // to clean them here.
4063  YoungGenerationExternalStringTableCleaner external_visitor(this);
4064  heap()->external_string_table_.IterateNewSpaceStrings(&external_visitor);
4065  heap()->external_string_table_.CleanUpNewSpaceStrings();
4066  }
4067 
4068  {
4069  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_CLEAR_WEAK_LISTS);
4070  // Process the weak references.
4071  MinorMarkCompactWeakObjectRetainer retainer(this);
4072  heap()->ProcessYoungWeakReferences(&retainer);
4073  }
4074 }
4075 
4076 void MinorMarkCompactCollector::EvacuatePrologue() {
4077  NewSpace* new_space = heap()->new_space();
4078  // Append the list of new space pages to be processed.
4079  for (Page* p :
4080  PageRange(new_space->first_allocatable_address(), new_space->top())) {
4081  new_space_evacuation_pages_.push_back(p);
4082  }
4083  new_space->Flip();
4084  new_space->ResetLinearAllocationArea();
4085 }
4086 
4087 void MinorMarkCompactCollector::EvacuateEpilogue() {
4088  heap()->new_space()->set_age_mark(heap()->new_space()->top());
4089  // Give pages that are queued to be freed back to the OS.
4090  heap()->memory_allocator()->unmapper()->FreeQueuedChunks();
4091 }
4092 
4093 UpdatingItem* MinorMarkCompactCollector::CreateToSpaceUpdatingItem(
4094  MemoryChunk* chunk, Address start, Address end) {
4095  return new ToSpaceUpdatingItem<NonAtomicMarkingState>(
4096  chunk, start, end, non_atomic_marking_state());
4097 }
4098 
4099 UpdatingItem* MinorMarkCompactCollector::CreateRememberedSetUpdatingItem(
4100  MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) {
4101  return new RememberedSetUpdatingItem<NonAtomicMarkingState>(
4102  heap(), non_atomic_marking_state(), chunk, updating_mode);
4103 }
4104 
4105 class MarkingItem;
4106 class GlobalHandlesMarkingItem;
4107 class PageMarkingItem;
4108 class RootMarkingItem;
4109 class YoungGenerationMarkingTask;
4110 
4111 class MarkingItem : public ItemParallelJob::Item {
4112  public:
4113  ~MarkingItem() override = default;
4114  virtual void Process(YoungGenerationMarkingTask* task) = 0;
4115 };
4116 
4117 class YoungGenerationMarkingTask : public ItemParallelJob::Task {
4118  public:
4119  YoungGenerationMarkingTask(
4120  Isolate* isolate, MinorMarkCompactCollector* collector,
4121  MinorMarkCompactCollector::MarkingWorklist* global_worklist, int task_id)
4122  : ItemParallelJob::Task(isolate),
4123  collector_(collector),
4124  marking_worklist_(global_worklist, task_id),
4125  marking_state_(collector->marking_state()),
4126  visitor_(marking_state_, global_worklist, task_id) {
4127  local_live_bytes_.reserve(isolate->heap()->new_space()->Capacity() /
4128  Page::kPageSize);
4129  }
4130 
4131  void RunInParallel() override {
4132  TRACE_BACKGROUND_GC(collector_->heap()->tracer(),
4133  GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_MARKING);
4134  double marking_time = 0.0;
4135  {
4136  TimedScope scope(&marking_time);
4137  MarkingItem* item = nullptr;
4138  while ((item = GetItem<MarkingItem>()) != nullptr) {
4139  item->Process(this);
4140  item->MarkFinished();
4141  EmptyLocalMarkingWorklist();
4142  }
4143  EmptyMarkingWorklist();
4144  DCHECK(marking_worklist_.IsLocalEmpty());
4145  FlushLiveBytes();
4146  }
4147  if (FLAG_trace_minor_mc_parallel_marking) {
4148  PrintIsolate(collector_->isolate(), "marking[%p]: time=%f\n",
4149  static_cast<void*>(this), marking_time);
4150  }
4151  };
4152 
4153  void MarkObject(Object* object) {
4154  if (!Heap::InNewSpace(object)) return;
4155  HeapObject* heap_object = HeapObject::cast(object);
4156  if (marking_state_->WhiteToGrey(heap_object)) {
4157  const int size = visitor_.Visit(heap_object);
4158  IncrementLiveBytes(heap_object, size);
4159  }
4160  }
4161 
4162  private:
4163  void EmptyLocalMarkingWorklist() {
4164  HeapObject* object = nullptr;
4165  while (marking_worklist_.Pop(&object)) {
4166  const int size = visitor_.Visit(object);
4167  IncrementLiveBytes(object, size);
4168  }
4169  }
4170 
4171  void EmptyMarkingWorklist() {
4172  HeapObject* object = nullptr;
4173  while (marking_worklist_.Pop(&object)) {
4174  const int size = visitor_.Visit(object);
4175  IncrementLiveBytes(object, size);
4176  }
4177  }
4178 
4179  void IncrementLiveBytes(HeapObject* object, intptr_t bytes) {
4180  local_live_bytes_[Page::FromAddress(reinterpret_cast<Address>(object))] +=
4181  bytes;
4182  }
4183 
4184  void FlushLiveBytes() {
4185  for (auto pair : local_live_bytes_) {
4186  marking_state_->IncrementLiveBytes(pair.first, pair.second);
4187  }
4188  }
4189 
4190  MinorMarkCompactCollector* collector_;
4191  MinorMarkCompactCollector::MarkingWorklist::View marking_worklist_;
4192  MinorMarkCompactCollector::MarkingState* marking_state_;
4193  YoungGenerationMarkingVisitor visitor_;
4194  std::unordered_map<Page*, intptr_t, Page::Hasher> local_live_bytes_;
4195 };
4196 
4197 class PageMarkingItem : public MarkingItem {
4198  public:
4199  explicit PageMarkingItem(MemoryChunk* chunk, std::atomic<int>* global_slots)
4200  : chunk_(chunk), global_slots_(global_slots), slots_(0) {}
4201  ~PageMarkingItem() override { *global_slots_ = *global_slots_ + slots_; }
4202 
4203  void Process(YoungGenerationMarkingTask* task) override {
4204  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4205  "PageMarkingItem::Process");
4206  base::MutexGuard guard(chunk_->mutex());
4207  MarkUntypedPointers(task);
4208  MarkTypedPointers(task);
4209  }
4210 
4211  private:
4212  inline Heap* heap() { return chunk_->heap(); }
4213 
4214  void MarkUntypedPointers(YoungGenerationMarkingTask* task) {
4215  RememberedSet<OLD_TO_NEW>::Iterate(chunk_,
4216  [this, task](MaybeObjectSlot slot) {
4217  return CheckAndMarkObject(task, slot);
4218  },
4219  SlotSet::PREFREE_EMPTY_BUCKETS);
4220  }
4221 
4222  void MarkTypedPointers(YoungGenerationMarkingTask* task) {
4223  RememberedSet<OLD_TO_NEW>::IterateTyped(
4224  chunk_,
4225  [this, task](SlotType slot_type, Address host_addr, Address slot) {
4226  return UpdateTypedSlotHelper::UpdateTypedSlot(
4227  heap(), slot_type, slot, [this, task](MaybeObjectSlot slot) {
4228  return CheckAndMarkObject(task, slot);
4229  });
4230  });
4231  }
4232 
4233  SlotCallbackResult CheckAndMarkObject(YoungGenerationMarkingTask* task,
4234  MaybeObjectSlot slot) {
4235  MaybeObject object = *slot;
4236  if (Heap::InNewSpace(object)) {
4237  // Marking happens before flipping the young generation, so the object
4238  // has to be in ToSpace.
4239  DCHECK(Heap::InToSpace(object));
4240  HeapObject* heap_object;
4241  bool success = object->GetHeapObject(&heap_object);
4242  USE(success);
4243  DCHECK(success);
4244  task->MarkObject(heap_object);
4245  slots_++;
4246  return KEEP_SLOT;
4247  }
4248  return REMOVE_SLOT;
4249  }
4250 
4251  MemoryChunk* chunk_;
4252  std::atomic<int>* global_slots_;
4253  int slots_;
4254 };
4255 
4256 class GlobalHandlesMarkingItem : public MarkingItem {
4257  public:
4258  GlobalHandlesMarkingItem(Heap* heap, GlobalHandles* global_handles,
4259  size_t start, size_t end)
4260  : global_handles_(global_handles), start_(start), end_(end) {}
4261  ~GlobalHandlesMarkingItem() override = default;
4262 
4263  void Process(YoungGenerationMarkingTask* task) override {
4264  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4265  "GlobalHandlesMarkingItem::Process");
4266  GlobalHandlesRootMarkingVisitor visitor(task);
4267  global_handles_
4268  ->IterateNewSpaceStrongAndDependentRootsAndIdentifyUnmodified(
4269  &visitor, start_, end_);
4270  }
4271 
4272  private:
4273  class GlobalHandlesRootMarkingVisitor : public RootVisitor {
4274  public:
4275  explicit GlobalHandlesRootMarkingVisitor(YoungGenerationMarkingTask* task)
4276  : task_(task) {}
4277 
4278  void VisitRootPointer(Root root, const char* description,
4279  ObjectSlot p) override {
4280  DCHECK_EQ(Root::kGlobalHandles, root);
4281  task_->MarkObject(*p);
4282  }
4283 
4284  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
4285  ObjectSlot end) override {
4286  DCHECK_EQ(Root::kGlobalHandles, root);
4287  for (ObjectSlot p = start; p < end; ++p) {
4288  task_->MarkObject(*p);
4289  }
4290  }
4291 
4292  private:
4293  YoungGenerationMarkingTask* task_;
4294  };
4295 
4296  GlobalHandles* global_handles_;
4297  size_t start_;
4298  size_t end_;
4299 };
4300 
4301 void MinorMarkCompactCollector::MarkRootSetInParallel(
4302  RootMarkingVisitor* root_visitor) {
4303  std::atomic<int> slots;
4304  {
4305  ItemParallelJob job(isolate()->cancelable_task_manager(),
4306  &page_parallel_job_semaphore_);
4307 
4308  // Seed the root set (roots + old->new set).
4309  {
4310  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_SEED);
4311  heap()->IterateRoots(root_visitor, VISIT_ALL_IN_MINOR_MC_MARK);
4312  // Create batches of global handles.
4313  SeedGlobalHandles<GlobalHandlesMarkingItem>(
4314  heap(), isolate()->global_handles(), &job);
4315  // Create items for each page.
4316  RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
4317  heap(), [&job, &slots](MemoryChunk* chunk) {
4318  job.AddItem(new PageMarkingItem(chunk, &slots));
4319  });
4320  }
4321 
4322  // Add tasks and run in parallel.
4323  {
4324  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_ROOTS);
4325  const int new_space_pages =
4326  static_cast<int>(heap()->new_space()->Capacity()) / Page::kPageSize;
4327  const int num_tasks = NumberOfParallelMarkingTasks(new_space_pages);
4328  for (int i = 0; i < num_tasks; i++) {
4329  job.AddTask(
4330  new YoungGenerationMarkingTask(isolate(), this, worklist(), i));
4331  }
4332  job.Run(isolate()->async_counters());
4333  DCHECK(worklist()->IsEmpty());
4334  }
4335  }
4336  old_to_new_slots_ = slots;
4337 }
4338 
4339 void MinorMarkCompactCollector::MarkLiveObjects() {
4340  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK);
4341 
4342  PostponeInterruptsScope postpone(isolate());
4343 
4344  RootMarkingVisitor root_visitor(this);
4345 
4346  MarkRootSetInParallel(&root_visitor);
4347 
4348  // Mark rest on the main thread.
4349  {
4350  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_WEAK);
4351  ProcessMarkingWorklist();
4352  }
4353 
4354  {
4355  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_MARK_GLOBAL_HANDLES);
4356  isolate()->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
4357  &IsUnmarkedObjectForYoungGeneration);
4358  isolate()
4359  ->global_handles()
4360  ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(&root_visitor);
4361  isolate()
4362  ->global_handles()
4363  ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
4364  &root_visitor, &IsUnmarkedObjectForYoungGeneration);
4365  ProcessMarkingWorklist();
4366  }
4367 }
4368 
4369 void MinorMarkCompactCollector::ProcessMarkingWorklist() {
4370  MarkingWorklist::View marking_worklist(worklist(), kMainMarker);
4371  HeapObject* object = nullptr;
4372  while (marking_worklist.Pop(&object)) {
4373  DCHECK(!object->IsFiller());
4374  DCHECK(object->IsHeapObject());
4375  DCHECK(heap()->Contains(object));
4376  DCHECK(non_atomic_marking_state()->IsGrey(object));
4377  main_marking_visitor()->Visit(object);
4378  }
4379  DCHECK(marking_worklist.IsLocalEmpty());
4380 }
4381 
4382 void MinorMarkCompactCollector::Evacuate() {
4383  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE);
4384  base::MutexGuard guard(heap()->relocation_mutex());
4385 
4386  {
4387  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_PROLOGUE);
4388  EvacuatePrologue();
4389  }
4390 
4391  {
4392  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_COPY);
4393  EvacuatePagesInParallel();
4394  }
4395 
4396  UpdatePointersAfterEvacuation();
4397 
4398  {
4399  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_REBALANCE);
4400  if (!heap()->new_space()->Rebalance()) {
4401  heap()->FatalProcessOutOfMemory("NewSpace::Rebalance");
4402  }
4403  }
4404 
4405  {
4406  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_CLEAN_UP);
4407  for (Page* p : new_space_evacuation_pages_) {
4408  if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION) ||
4409  p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) {
4410  p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION);
4411  p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION);
4412  p->SetFlag(Page::SWEEP_TO_ITERATE);
4413  sweep_to_iterate_pages_.push_back(p);
4414  }
4415  }
4416  new_space_evacuation_pages_.clear();
4417  }
4418 
4419  {
4420  TRACE_GC(heap()->tracer(), GCTracer::Scope::MINOR_MC_EVACUATE_EPILOGUE);
4421  EvacuateEpilogue();
4422  }
4423 }
4424 
4425 namespace {
4426 
4427 class YoungGenerationEvacuator : public Evacuator {
4428  public:
4429  YoungGenerationEvacuator(MinorMarkCompactCollector* collector,
4430  RecordMigratedSlotVisitor* record_visitor)
4431  : Evacuator(collector->heap(), record_visitor), collector_(collector) {}
4432 
4433  GCTracer::BackgroundScope::ScopeId GetBackgroundTracingScope() override {
4434  return GCTracer::BackgroundScope::MINOR_MC_BACKGROUND_EVACUATE_COPY;
4435  }
4436 
4437  protected:
4438  void RawEvacuatePage(MemoryChunk* chunk, intptr_t* live_bytes) override;
4439 
4440  MinorMarkCompactCollector* collector_;
4441 };
4442 
4443 void YoungGenerationEvacuator::RawEvacuatePage(MemoryChunk* chunk,
4444  intptr_t* live_bytes) {
4445  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
4446  "YoungGenerationEvacuator::RawEvacuatePage");
4447  MinorMarkCompactCollector::NonAtomicMarkingState* marking_state =
4448  collector_->non_atomic_marking_state();
4449  *live_bytes = marking_state->live_bytes(chunk);
4450  switch (ComputeEvacuationMode(chunk)) {
4451  case kObjectsNewToOld:
4452  LiveObjectVisitor::VisitGreyObjectsNoFail(
4453  chunk, marking_state, &new_space_visitor_,
4454  LiveObjectVisitor::kClearMarkbits);
4455  // ArrayBufferTracker will be updated during pointers updating.
4456  break;
4457  case kPageNewToOld:
4458  LiveObjectVisitor::VisitGreyObjectsNoFail(
4459  chunk, marking_state, &new_to_old_page_visitor_,
4460  LiveObjectVisitor::kKeepMarking);
4461  new_to_old_page_visitor_.account_moved_bytes(
4462  marking_state->live_bytes(chunk));
4463  if (chunk->owner()->identity() != NEW_LO_SPACE) {
4464  // TODO(mlippautz): If cleaning array buffers is too slow here we can
4465  // delay it until the next GC.
4466  ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4467  if (heap()->ShouldZapGarbage()) {
4468  collector_->MakeIterable(static_cast<Page*>(chunk),
4469  MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4470  } else if (heap()->incremental_marking()->IsMarking()) {
4471  // When incremental marking is on, we need to clear the mark bits of
4472  // the full collector. We cannot yet discard the young generation mark
4473  // bits as they are still relevant for pointers updating.
4474  collector_->MakeIterable(static_cast<Page*>(chunk),
4475  MarkingTreatmentMode::KEEP,
4476  IGNORE_FREE_SPACE);
4477  }
4478  }
4479  break;
4480  case kPageNewToNew:
4481  LiveObjectVisitor::VisitGreyObjectsNoFail(
4482  chunk, marking_state, &new_to_new_page_visitor_,
4483  LiveObjectVisitor::kKeepMarking);
4484  new_to_new_page_visitor_.account_moved_bytes(
4485  marking_state->live_bytes(chunk));
4486  DCHECK_NE(chunk->owner()->identity(), NEW_LO_SPACE);
4487  // TODO(mlippautz): If cleaning array buffers is too slow here we can
4488  // delay it until the next GC.
4489  ArrayBufferTracker::FreeDead(static_cast<Page*>(chunk), marking_state);
4490  if (heap()->ShouldZapGarbage()) {
4491  collector_->MakeIterable(static_cast<Page*>(chunk),
4492  MarkingTreatmentMode::KEEP, ZAP_FREE_SPACE);
4493  } else if (heap()->incremental_marking()->IsMarking()) {
4494  // When incremental marking is on, we need to clear the mark bits of
4495  // the full collector. We cannot yet discard the young generation mark
4496  // bits as they are still relevant for pointers updating.
4497  collector_->MakeIterable(static_cast<Page*>(chunk),
4498  MarkingTreatmentMode::KEEP, IGNORE_FREE_SPACE);
4499  }
4500  break;
4501  case kObjectsOldToOld:
4502  UNREACHABLE();
4503  break;
4504  }
4505 }
4506 
4507 } // namespace
4508 
4509 void MinorMarkCompactCollector::EvacuatePagesInParallel() {
4510  ItemParallelJob evacuation_job(isolate()->cancelable_task_manager(),
4511  &page_parallel_job_semaphore_);
4512  intptr_t live_bytes = 0;
4513 
4514  for (Page* page : new_space_evacuation_pages_) {
4515  intptr_t live_bytes_on_page = non_atomic_marking_state()->live_bytes(page);
4516  if (live_bytes_on_page == 0 && !page->contains_array_buffers()) continue;
4517  live_bytes += live_bytes_on_page;
4518  if (ShouldMovePage(page, live_bytes_on_page)) {
4519  if (page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK)) {
4520  EvacuateNewSpacePageVisitor<NEW_TO_OLD>::Move(page);
4521  } else {
4522  EvacuateNewSpacePageVisitor<NEW_TO_NEW>::Move(page);
4523  }
4524  }
4525  evacuation_job.AddItem(new EvacuationItem(page));
4526  }
4527  if (evacuation_job.NumberOfItems() == 0) return;
4528 
4529  YoungGenerationMigrationObserver observer(heap(),
4530  heap()->mark_compact_collector());
4531  YoungGenerationRecordMigratedSlotVisitor record_visitor(
4532  heap()->mark_compact_collector());
4533  CreateAndExecuteEvacuationTasks<YoungGenerationEvacuator>(
4534  this, &evacuation_job, &record_visitor, &observer, live_bytes);
4535 }
4536 
4537 int MinorMarkCompactCollector::CollectNewSpaceArrayBufferTrackerItems(
4538  ItemParallelJob* job) {
4539  int pages = 0;
4540  for (Page* p : new_space_evacuation_pages_) {
4541  if (Evacuator::ComputeEvacuationMode(p) == Evacuator::kObjectsNewToOld) {
4542  if (p->local_tracker() == nullptr) continue;
4543 
4544  pages++;
4545  job->AddItem(new ArrayBufferTrackerUpdatingItem(
4546  p, ArrayBufferTrackerUpdatingItem::kRegular));
4547  }
4548  }
4549  return pages;
4550 }
4551 
4552 #endif // ENABLE_MINOR_MC
4553 
4554 } // namespace internal
4555 } // namespace v8
Definition: libplatform.h:13