V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
mark-compact.h
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_MARK_COMPACT_H_
6 #define V8_HEAP_MARK_COMPACT_H_
7 
8 #include <vector>
9 
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/marking.h"
12 #include "src/heap/objects-visiting.h"
13 #include "src/heap/spaces.h"
14 #include "src/heap/sweeper.h"
15 #include "src/heap/worklist.h"
16 
17 namespace v8 {
18 namespace internal {
19 
20 // Forward declarations.
21 class EvacuationJobTraits;
22 class HeapObjectVisitor;
23 class ItemParallelJob;
24 class JSWeakCell;
25 class MigrationObserver;
26 class RecordMigratedSlotVisitor;
27 class UpdatingItem;
28 class YoungGenerationMarkingVisitor;
29 
30 template <typename ConcreteState, AccessMode access_mode>
32  public:
33  V8_INLINE MarkBit MarkBitFrom(HeapObject* obj) {
34  return MarkBitFrom(MemoryChunk::FromAddress(obj->address()),
35  obj->address());
36  }
37 
38  V8_INLINE MarkBit MarkBitFrom(MemoryChunk* p, Address addr) {
39  return static_cast<ConcreteState*>(this)->bitmap(p)->MarkBitFromIndex(
40  p->AddressToMarkbitIndex(addr));
41  }
42 
43  Marking::ObjectColor Color(HeapObject* obj) {
44  return Marking::Color(MarkBitFrom(obj));
45  }
46 
47  V8_INLINE bool IsImpossible(HeapObject* obj) {
48  return Marking::IsImpossible<access_mode>(MarkBitFrom(obj));
49  }
50 
51  V8_INLINE bool IsBlack(HeapObject* obj) {
52  return Marking::IsBlack<access_mode>(MarkBitFrom(obj));
53  }
54 
55  V8_INLINE bool IsWhite(HeapObject* obj) {
56  return Marking::IsWhite<access_mode>(MarkBitFrom(obj));
57  }
58 
59  V8_INLINE bool IsGrey(HeapObject* obj) {
60  return Marking::IsGrey<access_mode>(MarkBitFrom(obj));
61  }
62 
63  V8_INLINE bool IsBlackOrGrey(HeapObject* obj) {
64  return Marking::IsBlackOrGrey<access_mode>(MarkBitFrom(obj));
65  }
66 
67  V8_INLINE bool WhiteToGrey(HeapObject* obj);
68  V8_INLINE bool WhiteToBlack(HeapObject* obj);
69  V8_INLINE bool GreyToBlack(HeapObject* obj);
70 
71  void ClearLiveness(MemoryChunk* chunk) {
72  static_cast<ConcreteState*>(this)->bitmap(chunk)->Clear();
73  static_cast<ConcreteState*>(this)->SetLiveBytes(chunk, 0);
74  }
75 };
76 
78  public:
79  MarkBitCellIterator(MemoryChunk* chunk, Bitmap* bitmap) : chunk_(chunk) {
80  last_cell_index_ =
81  Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(chunk_->area_end()));
82  cell_base_ = chunk_->address();
83  cell_index_ =
84  Bitmap::IndexToCell(chunk_->AddressToMarkbitIndex(cell_base_));
85  cells_ = bitmap->cells();
86  }
87 
88  inline bool Done() { return cell_index_ >= last_cell_index_; }
89 
90  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
91 
92  inline MarkBit::CellType* CurrentCell() {
93  DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
94  chunk_->AddressToMarkbitIndex(cell_base_))));
95  return &cells_[cell_index_];
96  }
97 
98  inline Address CurrentCellBase() {
99  DCHECK_EQ(cell_index_, Bitmap::IndexToCell(Bitmap::CellAlignIndex(
100  chunk_->AddressToMarkbitIndex(cell_base_))));
101  return cell_base_;
102  }
103 
104  V8_WARN_UNUSED_RESULT inline bool Advance() {
105  cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
106  return ++cell_index_ != last_cell_index_;
107  }
108 
109  inline bool Advance(unsigned int new_cell_index) {
110  if (new_cell_index != cell_index_) {
111  DCHECK_GT(new_cell_index, cell_index_);
112  DCHECK_LE(new_cell_index, last_cell_index_);
113  unsigned int diff = new_cell_index - cell_index_;
114  cell_index_ = new_cell_index;
115  cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
116  return true;
117  }
118  return false;
119  }
120 
121  // Return the next mark bit cell. If there is no next it returns 0;
122  inline MarkBit::CellType PeekNext() {
123  if (HasNext()) {
124  return cells_[cell_index_ + 1];
125  }
126  return 0;
127  }
128 
129  private:
130  MemoryChunk* chunk_;
131  MarkBit::CellType* cells_;
132  unsigned int last_cell_index_;
133  unsigned int cell_index_;
134  Address cell_base_;
135 };
136 
137 enum LiveObjectIterationMode {
138  kBlackObjects,
139  kGreyObjects,
140  kAllLiveObjects
141 };
142 
143 template <LiveObjectIterationMode mode>
145  public:
146  class iterator {
147  public:
148  using value_type = std::pair<HeapObject*, int /* size */>;
149  using pointer = const value_type*;
150  using reference = const value_type&;
151  using iterator_category = std::forward_iterator_tag;
152 
153  inline iterator(MemoryChunk* chunk, Bitmap* bitmap, Address start);
154 
155  inline iterator& operator++();
156  inline iterator operator++(int);
157 
158  bool operator==(iterator other) const {
159  return current_object_ == other.current_object_;
160  }
161 
162  bool operator!=(iterator other) const { return !(*this == other); }
163 
164  value_type operator*() {
165  return std::make_pair(current_object_, current_size_);
166  }
167 
168  private:
169  inline void AdvanceToNextValidObject();
170 
171  MemoryChunk* const chunk_;
172  Map const one_word_filler_map_;
173  Map const two_word_filler_map_;
174  Map const free_space_map_;
176  Address cell_base_;
177  MarkBit::CellType current_cell_;
178  HeapObject* current_object_;
179  int current_size_;
180  };
181 
182  LiveObjectRange(MemoryChunk* chunk, Bitmap* bitmap)
183  : chunk_(chunk),
184  bitmap_(bitmap),
185  start_(chunk_->area_start()),
186  end_(chunk->area_end()) {}
187 
188  inline iterator begin();
189  inline iterator end();
190 
191  private:
192  MemoryChunk* const chunk_;
193  Bitmap* bitmap_;
194  Address start_;
195  Address end_;
196 };
197 
199  public:
200  enum IterationMode {
201  kKeepMarking,
202  kClearMarkbits,
203  };
204 
205  // Visits black objects on a MemoryChunk until the Visitor returns |false| for
206  // an object. If IterationMode::kClearMarkbits is passed the markbits and
207  // slots for visited objects are cleared for each successfully visited object.
208  template <class Visitor, typename MarkingState>
209  static bool VisitBlackObjects(MemoryChunk* chunk, MarkingState* state,
210  Visitor* visitor, IterationMode iteration_mode,
211  HeapObject** failed_object);
212 
213  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
214  // visitation for an object.
215  template <class Visitor, typename MarkingState>
216  static void VisitBlackObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
217  Visitor* visitor,
218  IterationMode iteration_mode);
219 
220  // Visits black objects on a MemoryChunk. The visitor is not allowed to fail
221  // visitation for an object.
222  template <class Visitor, typename MarkingState>
223  static void VisitGreyObjectsNoFail(MemoryChunk* chunk, MarkingState* state,
224  Visitor* visitor,
225  IterationMode iteration_mode);
226 
227  template <typename MarkingState>
228  static void RecomputeLiveBytes(MemoryChunk* chunk, MarkingState* state);
229 };
230 
231 enum PageEvacuationMode { NEW_TO_NEW, NEW_TO_OLD };
232 enum MarkingTreatmentMode { KEEP, CLEAR };
233 enum class RememberedSetUpdatingMode { ALL, OLD_TO_NEW_ONLY };
234 
235 // Base class for minor and full MC collectors.
237  public:
238  static const int kMainThread = 0;
239 
240  virtual ~MarkCompactCollectorBase() = default;
241 
242  virtual void SetUp() = 0;
243  virtual void TearDown() = 0;
244  virtual void CollectGarbage() = 0;
245 
246  inline Heap* heap() const { return heap_; }
247  inline Isolate* isolate();
248 
249  protected:
250  explicit MarkCompactCollectorBase(Heap* heap)
251  : heap_(heap), old_to_new_slots_(0) {}
252 
253  // Marking operations for objects reachable from roots.
254  virtual void MarkLiveObjects() = 0;
255  // Mark objects reachable (transitively) from objects in the marking
256  // work list.
257  virtual void ProcessMarkingWorklist() = 0;
258  // Clear non-live references held in side data structures.
259  virtual void ClearNonLiveReferences() = 0;
260  virtual void EvacuatePrologue() = 0;
261  virtual void EvacuateEpilogue() = 0;
262  virtual void Evacuate() = 0;
263  virtual void EvacuatePagesInParallel() = 0;
264  virtual void UpdatePointersAfterEvacuation() = 0;
265  virtual UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk,
266  Address start,
267  Address end) = 0;
268  virtual UpdatingItem* CreateRememberedSetUpdatingItem(
269  MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) = 0;
270 
271  template <class Evacuator, class Collector>
272  void CreateAndExecuteEvacuationTasks(
273  Collector* collector, ItemParallelJob* job,
274  RecordMigratedSlotVisitor* record_visitor,
275  MigrationObserver* migration_observer, const intptr_t live_bytes);
276 
277  // Returns whether this page should be moved according to heuristics.
278  bool ShouldMovePage(Page* p, intptr_t live_bytes);
279 
280  int CollectToSpaceUpdatingItems(ItemParallelJob* job);
281  template <typename IterateableSpace>
282  int CollectRememberedSetUpdatingItems(ItemParallelJob* job,
283  IterateableSpace* space,
284  RememberedSetUpdatingMode mode);
285 
286  int NumberOfParallelCompactionTasks(int pages);
287  int NumberOfParallelPointerUpdateTasks(int pages, int slots);
288  int NumberOfParallelToSpacePointerUpdateTasks(int pages);
289 
290  Heap* heap_;
291  // Number of old to new slots. Should be computed during MarkLiveObjects.
292  // -1 indicates that the value couldn't be computed.
293  int old_to_new_slots_;
294 };
295 
296 class MinorMarkingState final
297  : public MarkingStateBase<MinorMarkingState, AccessMode::ATOMIC> {
298  public:
299  Bitmap* bitmap(const MemoryChunk* chunk) const {
300  return chunk->young_generation_bitmap_;
301  }
302 
303  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
304  chunk->young_generation_live_byte_count_ += by;
305  }
306 
307  intptr_t live_bytes(MemoryChunk* chunk) const {
308  return chunk->young_generation_live_byte_count_;
309  }
310 
311  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
312  chunk->young_generation_live_byte_count_ = value;
313  }
314 };
315 
317  : public MarkingStateBase<MinorNonAtomicMarkingState,
318  AccessMode::NON_ATOMIC> {
319  public:
320  Bitmap* bitmap(const MemoryChunk* chunk) const {
321  return chunk->young_generation_bitmap_;
322  }
323 
324  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
325  chunk->young_generation_live_byte_count_ += by;
326  }
327 
328  intptr_t live_bytes(MemoryChunk* chunk) const {
329  return chunk->young_generation_live_byte_count_;
330  }
331 
332  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
333  chunk->young_generation_live_byte_count_ = value;
334  }
335 };
336 
337 // This marking state is used when concurrent marking is running.
339  : public MarkingStateBase<IncrementalMarkingState, AccessMode::ATOMIC> {
340  public:
341  Bitmap* bitmap(const MemoryChunk* chunk) const {
342  DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
343  reinterpret_cast<intptr_t>(chunk),
344  MemoryChunk::kMarkBitmapOffset);
345  return chunk->marking_bitmap_;
346  }
347 
348  // Concurrent marking uses local live bytes.
349  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
350  chunk->live_byte_count_ += by;
351  }
352 
353  intptr_t live_bytes(MemoryChunk* chunk) const {
354  return chunk->live_byte_count_;
355  }
356 
357  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
358  chunk->live_byte_count_ = value;
359  }
360 };
361 
363  : public MarkingStateBase<MajorAtomicMarkingState, AccessMode::ATOMIC> {
364  public:
365  Bitmap* bitmap(const MemoryChunk* chunk) const {
366  DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
367  reinterpret_cast<intptr_t>(chunk),
368  MemoryChunk::kMarkBitmapOffset);
369  return chunk->marking_bitmap_;
370  }
371 
372  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
373  chunk->live_byte_count_ += by;
374  }
375 
376  intptr_t live_bytes(MemoryChunk* chunk) const {
377  return chunk->live_byte_count_;
378  }
379 
380  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
381  chunk->live_byte_count_ = value;
382  }
383 };
384 
386  : public MarkingStateBase<MajorNonAtomicMarkingState,
387  AccessMode::NON_ATOMIC> {
388  public:
389  Bitmap* bitmap(const MemoryChunk* chunk) const {
390  DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
391  reinterpret_cast<intptr_t>(chunk),
392  MemoryChunk::kMarkBitmapOffset);
393  return chunk->marking_bitmap_;
394  }
395 
396  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
397  chunk->live_byte_count_ += by;
398  }
399 
400  intptr_t live_bytes(MemoryChunk* chunk) const {
401  return chunk->live_byte_count_;
402  }
403 
404  void SetLiveBytes(MemoryChunk* chunk, intptr_t value) {
405  chunk->live_byte_count_ = value;
406  }
407 };
408 
409 struct Ephemeron {
410  HeapObject* key;
411  HeapObject* value;
412 };
413 
415 
416 // Weak objects encountered during marking.
417 struct WeakObjects {
418  Worklist<TransitionArray*, 64> transition_arrays;
419 
420  // Keep track of all EphemeronHashTables in the heap to process
421  // them in the atomic pause.
422  Worklist<EphemeronHashTable, 64> ephemeron_hash_tables;
423 
424  // Keep track of all ephemerons for concurrent marking tasks. Only store
425  // ephemerons in these Worklists if both key and value are unreachable at the
426  // moment.
427  //
428  // MarkCompactCollector::ProcessEphemeronsUntilFixpoint drains and fills these
429  // worklists.
430  //
431  // current_ephemerons is used as draining worklist in the current fixpoint
432  // iteration.
433  EphemeronWorklist current_ephemerons;
434 
435  // Stores ephemerons to visit in the next fixpoint iteration.
436  EphemeronWorklist next_ephemerons;
437 
438  // When draining the marking worklist new discovered ephemerons are pushed
439  // into this worklist.
440  EphemeronWorklist discovered_ephemerons;
441 
442  // TODO(marja): For old space, we only need the slot, not the host
443  // object. Optimize this by adding a different storage for old space.
445  Worklist<std::pair<HeapObject*, Code>, 64> weak_objects_in_code;
446 
447  Worklist<JSWeakCell*, 64> js_weak_cells;
448 };
449 
451  std::vector<HeapObject*> newly_discovered;
452  bool newly_discovered_overflowed;
453  size_t newly_discovered_limit;
454 };
455 
456 // Collector for young and old generation.
458  public:
459 #ifdef V8_CONCURRENT_MARKING
461 #else
463 #endif // V8_CONCURRENT_MARKING
464 
466 
467  // Wrapper for the shared and bailout worklists.
469  public:
472 
473  // The heap parameter is not used but needed to match the sequential case.
474  explicit MarkingWorklist(Heap* heap) {}
475 
476  void Push(HeapObject* object) {
477  bool success = shared_.Push(kMainThread, object);
478  USE(success);
479  DCHECK(success);
480  }
481 
482  void PushBailout(HeapObject* object) {
483  bool success = bailout_.Push(kMainThread, object);
484  USE(success);
485  DCHECK(success);
486  }
487 
488  HeapObject* Pop() {
489  HeapObject* result;
490 #ifdef V8_CONCURRENT_MARKING
491  if (bailout_.Pop(kMainThread, &result)) return result;
492 #endif
493  if (shared_.Pop(kMainThread, &result)) return result;
494 #ifdef V8_CONCURRENT_MARKING
495  // The expectation is that this work list is empty almost all the time
496  // and we can thus avoid the emptiness checks by putting it last.
497  if (on_hold_.Pop(kMainThread, &result)) return result;
498 #endif
499  return nullptr;
500  }
501 
502  HeapObject* PopBailout() {
503 #ifdef V8_CONCURRENT_MARKING
504  HeapObject* result;
505  if (bailout_.Pop(kMainThread, &result)) return result;
506 #endif
507  return nullptr;
508  }
509 
510  void Clear() {
511  bailout_.Clear();
512  shared_.Clear();
513  on_hold_.Clear();
514  embedder_.Clear();
515  }
516 
517  bool IsBailoutEmpty() { return bailout_.IsLocalEmpty(kMainThread); }
518 
519  bool IsEmpty() {
520  return bailout_.IsLocalEmpty(kMainThread) &&
521  shared_.IsLocalEmpty(kMainThread) &&
522  on_hold_.IsLocalEmpty(kMainThread) &&
523  bailout_.IsGlobalPoolEmpty() && shared_.IsGlobalPoolEmpty() &&
524  on_hold_.IsGlobalPoolEmpty();
525  }
526 
527  bool IsEmbedderEmpty() {
528  return embedder_.IsLocalEmpty(kMainThread) &&
529  embedder_.IsGlobalPoolEmpty();
530  }
531 
532  int Size() {
533  return static_cast<int>(bailout_.LocalSize(kMainThread) +
534  shared_.LocalSize(kMainThread) +
535  on_hold_.LocalSize(kMainThread));
536  }
537 
538  // Calls the specified callback on each element of the deques and replaces
539  // the element with the result of the callback. If the callback returns
540  // nullptr then the element is removed from the deque.
541  // The callback must accept HeapObject* and return HeapObject*.
542  template <typename Callback>
543  void Update(Callback callback) {
544  bailout_.Update(callback);
545  shared_.Update(callback);
546  on_hold_.Update(callback);
547  embedder_.Update(callback);
548  }
549 
550  ConcurrentMarkingWorklist* shared() { return &shared_; }
551  ConcurrentMarkingWorklist* bailout() { return &bailout_; }
552  ConcurrentMarkingWorklist* on_hold() { return &on_hold_; }
553  EmbedderTracingWorklist* embedder() { return &embedder_; }
554 
555  void Print() {
556  PrintWorklist("shared", &shared_);
557  PrintWorklist("bailout", &bailout_);
558  PrintWorklist("on_hold", &on_hold_);
559  }
560 
561  private:
562  // Prints the stats about the global pool of the worklist.
563  void PrintWorklist(const char* worklist_name,
564  ConcurrentMarkingWorklist* worklist);
565 
566  // Worklist used for most objects.
568 
569  // Concurrent marking uses this worklist to bail out of concurrently
570  // marking certain object types. These objects are handled later in a STW
571  // pause after concurrent marking has finished.
572  ConcurrentMarkingWorklist bailout_;
573 
574  // Concurrent marking uses this worklist to bail out of marking objects
575  // in new space's linear allocation area. Used to avoid black allocation
576  // for new space. This allow the compiler to remove write barriers
577  // for freshly allocatd objects.
578  ConcurrentMarkingWorklist on_hold_;
579 
580  // Worklist for objects that potentially require embedder tracing, i.e.,
581  // these objects need to be handed over to the embedder to find the full
582  // transitive closure.
583  EmbedderTracingWorklist embedder_;
584  };
585 
586  class RootMarkingVisitor;
588 
589  enum IterationMode {
590  kKeepMarking,
591  kClearMarkbits,
592  };
593 
594  MarkingState* marking_state() { return &marking_state_; }
595 
596  NonAtomicMarkingState* non_atomic_marking_state() {
597  return &non_atomic_marking_state_;
598  }
599 
600  void SetUp() override;
601  void TearDown() override;
602  // Performs a global garbage collection.
603  void CollectGarbage() override;
604 
605  void CollectEvacuationCandidates(PagedSpace* space);
606 
607  void AddEvacuationCandidate(Page* p);
608 
609  // Prepares for GC by resetting relocation info in old and map spaces and
610  // choosing spaces to compact.
611  void Prepare();
612 
613  // Stop concurrent marking (either by preempting it right away or waiting for
614  // it to complete as requested by |stop_request|).
615  void FinishConcurrentMarking(ConcurrentMarking::StopRequest stop_request);
616 
617  bool StartCompaction();
618 
619  void AbortCompaction();
620 
621  static inline bool IsOnEvacuationCandidate(Object* obj) {
622  return Page::FromAddress(reinterpret_cast<Address>(obj))
623  ->IsEvacuationCandidate();
624  }
625 
626  static bool IsOnEvacuationCandidate(MaybeObject obj);
627 
628  void RecordRelocSlot(Code host, RelocInfo* rinfo, Object* target);
629  V8_INLINE static void RecordSlot(HeapObject* object, ObjectSlot slot,
630  HeapObject* target);
631  V8_INLINE static void RecordSlot(HeapObject* object, HeapObjectSlot slot,
632  HeapObject* target);
633  void RecordLiveSlotsOnPage(Page* page);
634 
635  void UpdateSlots(SlotsBuffer* buffer);
636  void UpdateSlotsRecordedIn(SlotsBuffer* buffer);
637 
638  bool is_compacting() const { return compacting_; }
639 
640  // Ensures that sweeping is finished.
641  //
642  // Note: Can only be called safely from main thread.
643  void EnsureSweepingCompleted();
644 
645  // Checks if sweeping is in progress right now on any space.
646  bool sweeping_in_progress() const { return sweeper_->sweeping_in_progress(); }
647 
648  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
649 
650  bool evacuation() const { return evacuation_; }
651 
652  MarkingWorklist* marking_worklist() { return &marking_worklist_; }
653 
654  WeakObjects* weak_objects() { return &weak_objects_; }
655 
656  void AddTransitionArray(TransitionArray* array) {
657  weak_objects_.transition_arrays.Push(kMainThread, array);
658  }
659 
660  void AddEphemeronHashTable(EphemeronHashTable table) {
661  weak_objects_.ephemeron_hash_tables.Push(kMainThread, table);
662  }
663 
664  void AddEphemeron(HeapObject* key, HeapObject* value) {
665  weak_objects_.discovered_ephemerons.Push(kMainThread,
666  Ephemeron{key, value});
667  }
668 
669  void AddWeakReference(HeapObject* host, HeapObjectSlot slot) {
670  weak_objects_.weak_references.Push(kMainThread, std::make_pair(host, slot));
671  }
672 
673  void AddWeakObjectInCode(HeapObject* object, Code code) {
674  weak_objects_.weak_objects_in_code.Push(kMainThread,
675  std::make_pair(object, code));
676  }
677 
678  void AddWeakCell(JSWeakCell* weak_cell) {
679  weak_objects_.js_weak_cells.Push(kMainThread, weak_cell);
680  }
681 
682  void AddNewlyDiscovered(HeapObject* object) {
683  if (ephemeron_marking_.newly_discovered_overflowed) return;
684 
685  if (ephemeron_marking_.newly_discovered.size() <
686  ephemeron_marking_.newly_discovered_limit) {
687  ephemeron_marking_.newly_discovered.push_back(object);
688  } else {
689  ephemeron_marking_.newly_discovered_overflowed = true;
690  }
691  }
692 
693  void ResetNewlyDiscovered() {
694  ephemeron_marking_.newly_discovered_overflowed = false;
695  ephemeron_marking_.newly_discovered.clear();
696  }
697 
698  Sweeper* sweeper() { return sweeper_; }
699 
700 #ifdef DEBUG
701  // Checks whether performing mark-compact collection.
702  bool in_use() { return state_ > PREPARE_GC; }
703  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
704 #endif
705 
706  void VerifyMarking();
707 #ifdef VERIFY_HEAP
708  void VerifyValidStoreAndSlotsBufferEntries();
709  void VerifyMarkbitsAreClean();
710  void VerifyMarkbitsAreDirty(PagedSpace* space);
711  void VerifyMarkbitsAreClean(PagedSpace* space);
712  void VerifyMarkbitsAreClean(NewSpace* space);
713  void VerifyMarkbitsAreClean(LargeObjectSpace* space);
714 #endif
715 
716  private:
717  explicit MarkCompactCollector(Heap* heap);
718  ~MarkCompactCollector() override;
719 
720  void ComputeEvacuationHeuristics(size_t area_size,
721  int* target_fragmentation_percent,
722  size_t* max_evacuated_bytes);
723 
724  void RecordObjectStats();
725 
726  // Finishes GC, performs heap verification if enabled.
727  void Finish();
728 
729  void MarkLiveObjects() override;
730 
731  // Marks the object black and adds it to the marking work list.
732  // This is for non-incremental marking only.
733  V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
734 
735  // Marks the object black and adds it to the marking work list.
736  // This is for non-incremental marking only.
737  V8_INLINE void MarkRootObject(Root root, HeapObject* obj);
738 
739  // Used by wrapper tracing.
740  V8_INLINE void MarkExternallyReferencedObject(HeapObject* obj);
741 
742  // Mark the heap roots and all objects reachable from them.
743  void MarkRoots(RootVisitor* root_visitor,
744  ObjectVisitor* custom_root_body_visitor);
745 
746  // Mark the string table specially. References to internalized strings from
747  // the string table are weak.
748  void MarkStringTable(ObjectVisitor* visitor);
749 
750  // Marks object reachable from harmony weak maps and wrapper tracing.
751  void ProcessEphemeronMarking();
752 
753  // If the call-site of the top optimized code was not prepared for
754  // deoptimization, then treat embedded pointers in the code as strong as
755  // otherwise they can die and try to deoptimize the underlying code.
756  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
757 
758  // Collects a list of dependent code from maps embedded in optimize code.
759  DependentCode* DependentCodeListFromNonLiveMaps();
760 
761  // Drains the main thread marking work list. Will mark all pending objects
762  // if no concurrent threads are running.
763  void ProcessMarkingWorklist() override;
764 
765  enum class MarkingWorklistProcessingMode {
766  kDefault,
767  kTrackNewlyDiscoveredObjects
768  };
769 
770  template <MarkingWorklistProcessingMode mode>
771  void ProcessMarkingWorklistInternal();
772 
773  // Implements ephemeron semantics: Marks value if key is already reachable.
774  // Returns true if value was actually marked.
775  bool VisitEphemeron(HeapObject* key, HeapObject* value);
776 
777  // Marks ephemerons and drains marking worklist iteratively
778  // until a fixpoint is reached.
779  void ProcessEphemeronsUntilFixpoint();
780 
781  // Drains ephemeron and marking worklists. Single iteration of the
782  // fixpoint iteration.
783  bool ProcessEphemerons();
784 
785  // Mark ephemerons and drain marking worklist with a linear algorithm.
786  // Only used if fixpoint iteration doesn't finish within a few iterations.
787  void ProcessEphemeronsLinear();
788 
789  // Perform Wrapper Tracing if in use.
790  void PerformWrapperTracing();
791 
792  // Callback function for telling whether the object *p is an unmarked
793  // heap object.
794  static bool IsUnmarkedHeapObject(Heap* heap, ObjectSlot p);
795 
796  // Clear non-live references in weak cells, transition and descriptor arrays,
797  // and deoptimize dependent code of non-live maps.
798  void ClearNonLiveReferences() override;
799  void MarkDependentCodeForDeoptimization();
800  // Checks if the given weak cell is a simple transition from the parent map
801  // of the given dead target. If so it clears the transition and trims
802  // the descriptor array of the parent if needed.
803  void ClearPotentialSimpleMapTransition(Map dead_target);
804  void ClearPotentialSimpleMapTransition(Map map, Map dead_target);
805  // Compact every array in the global list of transition arrays and
806  // trim the corresponding descriptor array if a transition target is non-live.
807  void ClearFullMapTransitions();
808  bool CompactTransitionArray(Map map, TransitionArray* transitions,
809  DescriptorArray* descriptors);
810  void TrimDescriptorArray(Map map, DescriptorArray* descriptors);
811  void TrimEnumCache(Map map, DescriptorArray* descriptors);
812 
813  // After all reachable objects have been marked those weak map entries
814  // with an unreachable key are removed from all encountered weak maps.
815  // The linked list of all encountered weak maps is destroyed.
816  void ClearWeakCollections();
817 
818  // Goes through the list of encountered weak references and clears those with
819  // dead values. If the value is a dead map and the parent map transitions to
820  // the dead map via weak cell, then this function also clears the map
821  // transition.
822  void ClearWeakReferences();
823 
824  // Goes through the list of encountered JSWeakCells and clears those with dead
825  // values.
826  void ClearJSWeakCells();
827 
828  void AbortWeakObjects();
829 
830  // Starts sweeping of spaces by contributing on the main thread and setting
831  // up other pages for sweeping. Does not start sweeper tasks.
832  void StartSweepSpaces();
833  void StartSweepSpace(PagedSpace* space);
834 
835  void EvacuatePrologue() override;
836  void EvacuateEpilogue() override;
837  void Evacuate() override;
838  void EvacuatePagesInParallel() override;
839  void UpdatePointersAfterEvacuation() override;
840 
841  UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
842  Address end) override;
843  UpdatingItem* CreateRememberedSetUpdatingItem(
844  MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
845 
846  int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
847  int CollectOldSpaceArrayBufferTrackerItems(ItemParallelJob* job);
848 
849  void ReleaseEvacuationCandidates();
850  void PostProcessEvacuationCandidates();
851  void ReportAbortedEvacuationCandidate(HeapObject* failed_object,
852  MemoryChunk* chunk);
853 
854  static const int kEphemeronChunkSize = 8 * KB;
855 
856  int NumberOfParallelEphemeronVisitingTasks(size_t elements);
857 
858  void RightTrimDescriptorArray(DescriptorArray* array,
859  int descriptors_to_trim);
860 
861  base::Mutex mutex_;
862  base::Semaphore page_parallel_job_semaphore_;
863 
864 #ifdef DEBUG
865  enum CollectorState {
866  IDLE,
867  PREPARE_GC,
868  MARK_LIVE_OBJECTS,
869  SWEEP_SPACES,
870  ENCODE_FORWARDING_ADDRESSES,
871  UPDATE_POINTERS,
872  RELOCATE_OBJECTS
873  };
874 
875  // The current stage of the collector.
876  CollectorState state_;
877 #endif
878 
879  bool was_marked_incrementally_;
880 
881  bool evacuation_;
882 
883  // True if we are collecting slots to perform evacuation from evacuation
884  // candidates.
885  bool compacting_;
886 
887  bool black_allocation_;
888 
889  bool have_code_to_deoptimize_;
890 
891  MarkingWorklist marking_worklist_;
892  WeakObjects weak_objects_;
893  EphemeronMarking ephemeron_marking_;
894 
895  // Candidates for pages that should be evacuated.
896  std::vector<Page*> evacuation_candidates_;
897  // Pages that are actually processed during evacuation.
898  std::vector<Page*> old_space_evacuation_pages_;
899  std::vector<Page*> new_space_evacuation_pages_;
900  std::vector<std::pair<HeapObject*, Page*>> aborted_evacuation_candidates_;
901 
902  Sweeper* sweeper_;
903 
904  MarkingState marking_state_;
905  NonAtomicMarkingState non_atomic_marking_state_;
906 
907  friend class EphemeronHashTableMarkingTask;
908  friend class FullEvacuator;
909  friend class Heap;
910  friend class RecordMigratedSlotVisitor;
911 };
912 
913 template <FixedArrayVisitationMode fixed_array_mode,
914  TraceRetainingPathMode retaining_path_mode, typename MarkingState>
915 class MarkingVisitor final
916  : public HeapVisitor<
917  int,
918  MarkingVisitor<fixed_array_mode, retaining_path_mode, MarkingState>> {
919  public:
920  typedef HeapVisitor<
922  Parent;
923 
924  V8_INLINE MarkingVisitor(MarkCompactCollector* collector,
925  MarkingState* marking_state);
926 
927  V8_INLINE bool ShouldVisitMapPointer() { return false; }
928 
929  V8_INLINE int VisitBytecodeArray(Map map, BytecodeArray object);
930  V8_INLINE int VisitEphemeronHashTable(Map map, EphemeronHashTable object);
931  V8_INLINE int VisitFixedArray(Map map, FixedArray object);
932  V8_INLINE int VisitJSApiObject(Map map, JSObject* object);
933  V8_INLINE int VisitJSArrayBuffer(Map map, JSArrayBuffer* object);
934  V8_INLINE int VisitJSDataView(Map map, JSDataView* object);
935  V8_INLINE int VisitJSTypedArray(Map map, JSTypedArray* object);
936  V8_INLINE int VisitMap(Map map, Map object);
937  V8_INLINE int VisitTransitionArray(Map map, TransitionArray* object);
938  V8_INLINE int VisitJSWeakCell(Map map, JSWeakCell* object);
939 
940  // ObjectVisitor implementation.
941  V8_INLINE void VisitPointer(HeapObject* host, ObjectSlot p) final;
942  V8_INLINE void VisitPointer(HeapObject* host, MaybeObjectSlot p) final;
943  V8_INLINE void VisitPointers(HeapObject* host, ObjectSlot start,
944  ObjectSlot end) final;
945  V8_INLINE void VisitPointers(HeapObject* host, MaybeObjectSlot start,
946  MaybeObjectSlot end) final;
947  V8_INLINE void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) final;
948  V8_INLINE void VisitCodeTarget(Code host, RelocInfo* rinfo) final;
949 
950  // Weak list pointers should be ignored during marking. The lists are
951  // reconstructed after GC.
952  void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
953  ObjectSlot end) final {}
954 
955  private:
956  // Granularity in which FixedArrays are scanned if |fixed_array_mode|
957  // is true.
958  static const int kProgressBarScanningChunk = 32 * 1024;
959 
960  V8_INLINE int VisitFixedArrayIncremental(Map map, FixedArray object);
961 
962  template <typename T>
963  V8_INLINE int VisitEmbedderTracingSubclass(Map map, T* object);
964 
965  V8_INLINE void MarkMapContents(Map map);
966 
967  // Marks the object black without pushing it on the marking work list. Returns
968  // true if the object needed marking and false otherwise.
969  V8_INLINE bool MarkObjectWithoutPush(HeapObject* host, HeapObject* object);
970 
971  // Marks the object grey and pushes it on the marking work list.
972  V8_INLINE void MarkObject(HeapObject* host, HeapObject* obj);
973 
974  MarkingState* marking_state() { return marking_state_; }
975 
976  MarkCompactCollector::MarkingWorklist* marking_worklist() const {
977  return collector_->marking_worklist();
978  }
979 
980  Heap* const heap_;
981  MarkCompactCollector* const collector_;
982  MarkingState* const marking_state_;
983 };
984 
986  public:
987  explicit EvacuationScope(MarkCompactCollector* collector)
988  : collector_(collector) {
989  collector_->set_evacuation(true);
990  }
991 
992  ~EvacuationScope() { collector_->set_evacuation(false); }
993 
994  private:
995  MarkCompactCollector* collector_;
996 };
997 
998 #ifdef ENABLE_MINOR_MC
999 
1000 // Collector for young-generation only.
1001 class MinorMarkCompactCollector final : public MarkCompactCollectorBase {
1002  public:
1003  using MarkingState = MinorMarkingState;
1004  using NonAtomicMarkingState = MinorNonAtomicMarkingState;
1005 
1006  explicit MinorMarkCompactCollector(Heap* heap);
1007  ~MinorMarkCompactCollector() override;
1008 
1009  MarkingState* marking_state() { return &marking_state_; }
1010 
1011  NonAtomicMarkingState* non_atomic_marking_state() {
1012  return &non_atomic_marking_state_;
1013  }
1014 
1015  void SetUp() override;
1016  void TearDown() override;
1017  void CollectGarbage() override;
1018 
1019  void MakeIterable(Page* page, MarkingTreatmentMode marking_mode,
1020  FreeSpaceTreatmentMode free_space_mode);
1021  void CleanupSweepToIteratePages();
1022 
1023  private:
1024  using MarkingWorklist = Worklist<HeapObject*, 64 /* segment size */>;
1025  class RootMarkingVisitor;
1026 
1027  static const int kNumMarkers = 8;
1028  static const int kMainMarker = 0;
1029 
1030  inline MarkingWorklist* worklist() { return worklist_; }
1031 
1032  inline YoungGenerationMarkingVisitor* main_marking_visitor() {
1033  return main_marking_visitor_;
1034  }
1035 
1036  void MarkLiveObjects() override;
1037  void MarkRootSetInParallel(RootMarkingVisitor* root_visitor);
1038  V8_INLINE void MarkRootObject(HeapObject* obj);
1039  void ProcessMarkingWorklist() override;
1040  void ClearNonLiveReferences() override;
1041 
1042  void EvacuatePrologue() override;
1043  void EvacuateEpilogue() override;
1044  void Evacuate() override;
1045  void EvacuatePagesInParallel() override;
1046  void UpdatePointersAfterEvacuation() override;
1047 
1048  UpdatingItem* CreateToSpaceUpdatingItem(MemoryChunk* chunk, Address start,
1049  Address end) override;
1050  UpdatingItem* CreateRememberedSetUpdatingItem(
1051  MemoryChunk* chunk, RememberedSetUpdatingMode updating_mode) override;
1052 
1053  int CollectNewSpaceArrayBufferTrackerItems(ItemParallelJob* job);
1054 
1055  int NumberOfParallelMarkingTasks(int pages);
1056 
1057  MarkingWorklist* worklist_;
1058 
1059  YoungGenerationMarkingVisitor* main_marking_visitor_;
1060  base::Semaphore page_parallel_job_semaphore_;
1061  std::vector<Page*> new_space_evacuation_pages_;
1062  std::vector<Page*> sweep_to_iterate_pages_;
1063 
1064  MarkingState marking_state_;
1065  NonAtomicMarkingState non_atomic_marking_state_;
1066 
1067  friend class YoungGenerationMarkingTask;
1068  friend class YoungGenerationMarkingVisitor;
1069 };
1070 
1071 #endif // ENABLE_MINOR_MC
1072 
1073 } // namespace internal
1074 } // namespace v8
1075 
1076 #endif // V8_HEAP_MARK_COMPACT_H_
Definition: libplatform.h:13