V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
scavenger.cc
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/scavenger.h"
6 
7 #include "src/heap/array-buffer-collector.h"
8 #include "src/heap/barrier.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/heap-inl.h"
11 #include "src/heap/item-parallel-job.h"
12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/objects-visiting-inl.h"
14 #include "src/heap/scavenger-inl.h"
15 #include "src/heap/sweeper.h"
16 #include "src/objects-body-descriptors-inl.h"
17 #include "src/utils-inl.h"
18 
19 namespace v8 {
20 namespace internal {
21 
23  public:
24  explicit PageScavengingItem(MemoryChunk* chunk) : chunk_(chunk) {}
25  ~PageScavengingItem() override = default;
26 
27  void Process(Scavenger* scavenger) { scavenger->ScavengePage(chunk_); }
28 
29  private:
30  MemoryChunk* const chunk_;
31 };
32 
33 class ScavengingTask final : public ItemParallelJob::Task {
34  public:
35  ScavengingTask(Heap* heap, Scavenger* scavenger, OneshotBarrier* barrier)
36  : ItemParallelJob::Task(heap->isolate()),
37  heap_(heap),
38  scavenger_(scavenger),
39  barrier_(barrier) {}
40 
41  void RunInParallel() final {
42  TRACE_BACKGROUND_GC(
43  heap_->tracer(),
44  GCTracer::BackgroundScope::SCAVENGER_BACKGROUND_SCAVENGE_PARALLEL);
45  double scavenging_time = 0.0;
46  {
47  barrier_->Start();
48  TimedScope scope(&scavenging_time);
49  PageScavengingItem* item = nullptr;
50  while ((item = GetItem<PageScavengingItem>()) != nullptr) {
51  item->Process(scavenger_);
52  item->MarkFinished();
53  }
54  do {
55  scavenger_->Process(barrier_);
56  } while (!barrier_->Wait());
57  scavenger_->Process();
58  }
59  if (FLAG_trace_parallel_scavenge) {
60  PrintIsolate(heap_->isolate(),
61  "scavenge[%p]: time=%.2f copied=%zu promoted=%zu\n",
62  static_cast<void*>(this), scavenging_time,
63  scavenger_->bytes_copied(), scavenger_->bytes_promoted());
64  }
65  };
66 
67  private:
68  Heap* const heap_;
69  Scavenger* const scavenger_;
70  OneshotBarrier* const barrier_;
71 };
72 
74  public:
76  bool record_slots)
77  : heap_(heap), scavenger_(scavenger), record_slots_(record_slots) {}
78 
79  inline void VisitPointers(HeapObject* host, ObjectSlot start,
80  ObjectSlot end) final {
81  for (ObjectSlot slot = start; slot < end; ++slot) {
82  Object* target = *slot;
83  DCHECK(!HasWeakHeapObjectTag(target));
84  if (target->IsHeapObject()) {
85  HandleSlot(host, HeapObjectSlot(slot), HeapObject::cast(target));
86  }
87  }
88  }
89 
90  inline void VisitPointers(HeapObject* host, MaybeObjectSlot start,
91  MaybeObjectSlot end) final {
92  // Treat weak references as strong. TODO(marja): Proper weakness handling in
93  // the young generation.
94  for (MaybeObjectSlot slot = start; slot < end; ++slot) {
95  MaybeObject target = *slot;
96  HeapObject* heap_object;
97  if (target->GetHeapObject(&heap_object)) {
98  HandleSlot(host, HeapObjectSlot(slot), heap_object);
99  }
100  }
101  }
102 
103  inline void HandleSlot(HeapObject* host, HeapObjectSlot slot,
104  HeapObject* target) {
105  scavenger_->PageMemoryFence(MaybeObject::FromObject(target));
106 
107  if (Heap::InFromSpace(target)) {
108  SlotCallbackResult result = scavenger_->ScavengeObject(slot, target);
109  bool success = (*slot)->GetHeapObject(&target);
110  USE(success);
111  DCHECK(success);
112 
113  if (result == KEEP_SLOT) {
114  SLOW_DCHECK(target->IsHeapObject());
115  RememberedSet<OLD_TO_NEW>::Insert(Page::FromAddress(slot.address()),
116  slot.address());
117  }
118  SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(
119  HeapObject::cast(target)));
120  } else if (record_slots_ && MarkCompactCollector::IsOnEvacuationCandidate(
121  HeapObject::cast(target))) {
122  heap_->mark_compact_collector()->RecordSlot(host, ObjectSlot(slot),
123  target);
124  }
125  }
126 
127  private:
128  Heap* const heap_;
129  Scavenger* const scavenger_;
130  const bool record_slots_;
131 };
132 
133 static bool IsUnscavengedHeapObject(Heap* heap, ObjectSlot p) {
134  return Heap::InFromSpace(*p) &&
135  !HeapObject::cast(*p)->map_word().IsForwardingAddress();
136 }
137 
139  public:
140  Object* RetainAs(Object* object) override {
141  if (!Heap::InFromSpace(object)) {
142  return object;
143  }
144 
145  MapWord map_word = HeapObject::cast(object)->map_word();
146  if (map_word.IsForwardingAddress()) {
147  return map_word.ToForwardingAddress();
148  }
149  return nullptr;
150  }
151 };
152 
153 ScavengerCollector::ScavengerCollector(Heap* heap)
154  : isolate_(heap->isolate()), heap_(heap), parallel_scavenge_semaphore_(0) {}
155 
156 void ScavengerCollector::CollectGarbage() {
157  DCHECK(surviving_new_large_objects_.empty());
158  ItemParallelJob job(isolate_->cancelable_task_manager(),
159  &parallel_scavenge_semaphore_);
160  const int kMainThreadId = 0;
161  Scavenger* scavengers[kMaxScavengerTasks];
162  const bool is_logging = isolate_->LogObjectRelocation();
163  const int num_scavenge_tasks = NumberOfScavengeTasks();
164  OneshotBarrier barrier(base::TimeDelta::FromMilliseconds(kMaxWaitTimeMs));
165  Scavenger::CopiedList copied_list(num_scavenge_tasks);
166  Scavenger::PromotionList promotion_list(num_scavenge_tasks);
167  for (int i = 0; i < num_scavenge_tasks; i++) {
168  scavengers[i] = new Scavenger(this, heap_, is_logging, &copied_list,
169  &promotion_list, i);
170  job.AddTask(new ScavengingTask(heap_, scavengers[i], &barrier));
171  }
172 
173  {
174  Sweeper* sweeper = heap_->mark_compact_collector()->sweeper();
175  // Pause the concurrent sweeper.
176  Sweeper::PauseOrCompleteScope pause_scope(sweeper);
177  // Filter out pages from the sweeper that need to be processed for old to
178  // new slots by the Scavenger. After processing, the Scavenger adds back
179  // pages that are still unsweeped. This way the Scavenger has exclusive
180  // access to the slots of a page and can completely avoid any locks on
181  // the page itself.
182  Sweeper::FilterSweepingPagesScope filter_scope(sweeper, pause_scope);
183  filter_scope.FilterOldSpaceSweepingPages(
184  [](Page* page) { return !page->ContainsSlots<OLD_TO_NEW>(); });
185  RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(
186  heap_, [&job](MemoryChunk* chunk) {
187  job.AddItem(new PageScavengingItem(chunk));
188  });
189 
190  RootScavengeVisitor root_scavenge_visitor(scavengers[kMainThreadId]);
191 
192  {
193  // Identify weak unmodified handles. Requires an unmodified graph.
194  TRACE_GC(
195  heap_->tracer(),
196  GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_IDENTIFY);
197  isolate_->global_handles()->IdentifyWeakUnmodifiedObjects(
198  &JSObject::IsUnmodifiedApiObject);
199  }
200  {
201  // Copy roots.
202  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_ROOTS);
203  heap_->IterateRoots(&root_scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
204  }
205  {
206  // Parallel phase scavenging all copied and promoted objects.
207  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_PARALLEL);
208  job.Run(isolate_->async_counters());
209  DCHECK(copied_list.IsEmpty());
210  DCHECK(promotion_list.IsEmpty());
211  }
212  {
213  // Scavenge weak global handles.
214  TRACE_GC(heap_->tracer(),
215  GCTracer::Scope::SCAVENGER_SCAVENGE_WEAK_GLOBAL_HANDLES_PROCESS);
216  isolate_->global_handles()->MarkNewSpaceWeakUnmodifiedObjectsPending(
217  &IsUnscavengedHeapObject);
218  isolate_->global_handles()
219  ->IterateNewSpaceWeakUnmodifiedRootsForFinalizers(
220  &root_scavenge_visitor);
221  scavengers[kMainThreadId]->Process();
222 
223  DCHECK(copied_list.IsEmpty());
224  DCHECK(promotion_list.IsEmpty());
225  isolate_->global_handles()
226  ->IterateNewSpaceWeakUnmodifiedRootsForPhantomHandles(
227  &root_scavenge_visitor, &IsUnscavengedHeapObject);
228  }
229 
230  {
231  // Finalize parallel scavenging.
232  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_FINALIZE);
233 
234  for (int i = 0; i < num_scavenge_tasks; i++) {
235  scavengers[i]->Finalize();
236  delete scavengers[i];
237  }
238 
239  HandleSurvivingNewLargeObjects();
240  }
241  }
242 
243  {
244  // Update references into new space
245  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_SCAVENGE_UPDATE_REFS);
246  heap_->UpdateNewSpaceReferencesInExternalStringTable(
247  &Heap::UpdateNewSpaceReferenceInExternalStringTableEntry);
248 
249  heap_->incremental_marking()->UpdateMarkingWorklistAfterScavenge();
250  }
251 
252  if (FLAG_concurrent_marking) {
253  // Ensure that concurrent marker does not track pages that are
254  // going to be unmapped.
255  for (Page* p :
256  PageRange(heap_->new_space()->from_space().first_page(), nullptr)) {
257  heap_->concurrent_marking()->ClearLiveness(p);
258  }
259  }
260 
261  ScavengeWeakObjectRetainer weak_object_retainer;
262  heap_->ProcessYoungWeakReferences(&weak_object_retainer);
263 
264  // Set age mark.
265  heap_->new_space_->set_age_mark(heap_->new_space()->top());
266 
267  {
268  TRACE_GC(heap_->tracer(), GCTracer::Scope::SCAVENGER_PROCESS_ARRAY_BUFFERS);
269  ArrayBufferTracker::PrepareToFreeDeadInNewSpace(heap_);
270  }
271  heap_->array_buffer_collector()->FreeAllocations();
272 
273  // Since we promote all surviving large objects immediatelly, all remaining
274  // large objects must be dead.
275  // TODO(hpayer): Don't free all as soon as we have an intermediate generation.
276  heap_->new_lo_space()->FreeAllObjects();
277 
278  RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
279  if (chunk->SweepingDone()) {
280  RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
281  } else {
282  RememberedSet<OLD_TO_NEW>::PreFreeEmptyBuckets(chunk);
283  }
284  });
285 
286  // Update how much has survived scavenge.
287  heap_->IncrementYoungSurvivorsCounter(heap_->SurvivedNewSpaceObjectSize());
288 }
289 
290 void ScavengerCollector::HandleSurvivingNewLargeObjects() {
291  for (SurvivingNewLargeObjectMapEntry update_info :
292  surviving_new_large_objects_) {
293  HeapObject* object = update_info.first;
294  Map map = update_info.second;
295  // Order is important here. We have to re-install the map to have access
296  // to meta-data like size during page promotion.
297  object->set_map_word(MapWord::FromMap(map));
298  LargePage* page = LargePage::FromHeapObject(object);
299  heap_->lo_space()->PromoteNewLargeObject(page);
300  }
301  surviving_new_large_objects_.clear();
302 }
303 
304 void ScavengerCollector::MergeSurvivingNewLargeObjects(
305  const SurvivingNewLargeObjectsMap& objects) {
306  for (SurvivingNewLargeObjectMapEntry object : objects) {
307  bool success = surviving_new_large_objects_.insert(object).second;
308  USE(success);
309  DCHECK(success);
310  }
311 }
312 
313 int ScavengerCollector::NumberOfScavengeTasks() {
314  if (!FLAG_parallel_scavenge) return 1;
315  const int num_scavenge_tasks =
316  static_cast<int>(heap_->new_space()->TotalCapacity()) / MB;
317  static int num_cores = V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
318  int tasks =
319  Max(1, Min(Min(num_scavenge_tasks, kMaxScavengerTasks), num_cores));
320  if (!heap_->CanExpandOldGeneration(
321  static_cast<size_t>(tasks * Page::kPageSize))) {
322  // Optimize for memory usage near the heap limit.
323  tasks = 1;
324  }
325  return tasks;
326 }
327 
328 Scavenger::Scavenger(ScavengerCollector* collector, Heap* heap, bool is_logging,
329  CopiedList* copied_list, PromotionList* promotion_list,
330  int task_id)
331  : collector_(collector),
332  heap_(heap),
333  promotion_list_(promotion_list, task_id),
334  copied_list_(copied_list, task_id),
335  local_pretenuring_feedback_(kInitialLocalPretenuringFeedbackCapacity),
336  copied_size_(0),
337  promoted_size_(0),
338  allocator_(heap),
339  is_logging_(is_logging),
340  is_incremental_marking_(heap->incremental_marking()->IsMarking()),
341  is_compacting_(heap->incremental_marking()->IsCompacting()) {}
342 
343 void Scavenger::IterateAndScavengePromotedObject(HeapObject* target, Map map,
344  int size) {
345  // We are not collecting slots on new space objects during mutation thus we
346  // have to scan for pointers to evacuation candidates when we promote
347  // objects. But we should not record any slots in non-black objects. Grey
348  // object's slots would be rescanned. White object might not survive until
349  // the end of collection it would be a violation of the invariant to record
350  // its slots.
351  const bool record_slots =
352  is_compacting_ &&
353  heap()->incremental_marking()->atomic_marking_state()->IsBlack(target);
354  IterateAndScavengePromotedObjectsVisitor visitor(heap(), this, record_slots);
355  target->IterateBodyFast(map, size, &visitor);
356 }
357 
358 void Scavenger::AddPageToSweeperIfNecessary(MemoryChunk* page) {
359  AllocationSpace space = page->owner()->identity();
360  if ((space == OLD_SPACE) && !page->SweepingDone()) {
361  heap()->mark_compact_collector()->sweeper()->AddPage(
362  space, reinterpret_cast<Page*>(page),
363  Sweeper::READD_TEMPORARY_REMOVED_PAGE);
364  }
365 }
366 
367 void Scavenger::ScavengePage(MemoryChunk* page) {
368  CodePageMemoryModificationScope memory_modification_scope(page);
369  RememberedSet<OLD_TO_NEW>::Iterate(page,
370  [this](MaybeObjectSlot addr) {
371  return CheckAndScavengeObject(heap_,
372  addr);
373  },
374  SlotSet::KEEP_EMPTY_BUCKETS);
375  RememberedSet<OLD_TO_NEW>::IterateTyped(
376  page, [this](SlotType type, Address host_addr, Address addr) {
377  return UpdateTypedSlotHelper::UpdateTypedSlot(
378  heap_, type, addr, [this](MaybeObjectSlot slot) {
379  return CheckAndScavengeObject(heap(), slot);
380  });
381  });
382 
383  AddPageToSweeperIfNecessary(page);
384 }
385 
386 void Scavenger::Process(OneshotBarrier* barrier) {
387  ScavengeVisitor scavenge_visitor(this);
388 
389  const bool have_barrier = barrier != nullptr;
390  bool done;
391  size_t objects = 0;
392  do {
393  done = true;
394  ObjectAndSize object_and_size;
395  while (promotion_list_.ShouldEagerlyProcessPromotionList() &&
396  copied_list_.Pop(&object_and_size)) {
397  scavenge_visitor.Visit(object_and_size.first);
398  done = false;
399  if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
400  if (!copied_list_.IsGlobalPoolEmpty()) {
401  barrier->NotifyAll();
402  }
403  }
404  }
405 
406  struct PromotionListEntry entry;
407  while (promotion_list_.Pop(&entry)) {
408  HeapObject* target = entry.heap_object;
409  DCHECK(!target->IsMap());
410  IterateAndScavengePromotedObject(target, entry.map, entry.size);
411  done = false;
412  if (have_barrier && ((++objects % kInterruptThreshold) == 0)) {
413  if (!promotion_list_.IsGlobalPoolEmpty()) {
414  barrier->NotifyAll();
415  }
416  }
417  }
418  } while (!done);
419 }
420 
421 void Scavenger::Finalize() {
422  heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
423  heap()->IncrementSemiSpaceCopiedObjectSize(copied_size_);
424  heap()->IncrementPromotedObjectsSize(promoted_size_);
425  collector_->MergeSurvivingNewLargeObjects(surviving_new_large_objects_);
426  allocator_.Finalize();
427 }
428 
429 void RootScavengeVisitor::VisitRootPointer(Root root, const char* description,
430  ObjectSlot p) {
431  DCHECK(!HasWeakHeapObjectTag(*p));
432  ScavengePointer(p);
433 }
434 
435 void RootScavengeVisitor::VisitRootPointers(Root root, const char* description,
436  ObjectSlot start, ObjectSlot end) {
437  // Copy all HeapObject pointers in [start, end)
438  for (ObjectSlot p = start; p < end; ++p) ScavengePointer(p);
439 }
440 
441 void RootScavengeVisitor::ScavengePointer(ObjectSlot p) {
442  Object* object = *p;
443  DCHECK(!HasWeakHeapObjectTag(object));
444  if (!Heap::InNewSpace(object)) return;
445 
446  scavenger_->ScavengeObject(HeapObjectSlot(p),
447  reinterpret_cast<HeapObject*>(object));
448 }
449 
450 RootScavengeVisitor::RootScavengeVisitor(Scavenger* scavenger)
451  : scavenger_(scavenger) {}
452 
453 ScavengeVisitor::ScavengeVisitor(Scavenger* scavenger)
454  : scavenger_(scavenger) {}
455 
456 } // namespace internal
457 } // namespace v8
Definition: libplatform.h:13