V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
incremental-marking.cc
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/incremental-marking.h"
6 
7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h"
9 #include "src/conversions.h"
10 #include "src/heap/concurrent-marking.h"
11 #include "src/heap/embedder-tracing.h"
12 #include "src/heap/gc-idle-time-handler.h"
13 #include "src/heap/gc-tracer.h"
14 #include "src/heap/heap-inl.h"
15 #include "src/heap/incremental-marking-inl.h"
16 #include "src/heap/mark-compact-inl.h"
17 #include "src/heap/object-stats.h"
18 #include "src/heap/objects-visiting-inl.h"
19 #include "src/heap/objects-visiting.h"
20 #include "src/heap/sweeper.h"
21 #include "src/objects/hash-table-inl.h"
22 #include "src/objects/slots-inl.h"
23 #include "src/tracing/trace-event.h"
24 #include "src/v8.h"
25 #include "src/visitors.h"
26 #include "src/vm-state-inl.h"
27 
28 namespace v8 {
29 namespace internal {
30 
31 using IncrementalMarkingMarkingVisitor =
32  MarkingVisitor<FixedArrayVisitationMode::kIncremental,
33  TraceRetainingPathMode::kDisabled,
34  IncrementalMarking::MarkingState>;
35 
36 void IncrementalMarking::Observer::Step(int bytes_allocated, Address addr,
37  size_t size) {
38  Heap* heap = incremental_marking_.heap();
39  VMState<GC> state(heap->isolate());
40  RuntimeCallTimerScope runtime_timer(
41  heap->isolate(),
42  RuntimeCallCounterId::kGC_Custom_IncrementalMarkingObserver);
43  incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
44  if (incremental_marking_.black_allocation() && addr != kNullAddress) {
45  // AdvanceIncrementalMarkingOnAllocation can start black allocation.
46  // Ensure that the new object is marked black.
47  HeapObject* object = HeapObject::FromAddress(addr);
48  if (incremental_marking_.marking_state()->IsWhite(object) &&
49  !(Heap::InNewSpace(object) || heap->new_lo_space()->Contains(object))) {
50  if (heap->IsLargeObject(object)) {
51  incremental_marking_.marking_state()->WhiteToBlack(object);
52  } else {
53  Page::FromAddress(addr)->CreateBlackArea(addr, addr + size);
54  }
55  }
56  }
57 }
58 
59 IncrementalMarking::IncrementalMarking(
60  Heap* heap, MarkCompactCollector::MarkingWorklist* marking_worklist,
61  WeakObjects* weak_objects)
62  : heap_(heap),
63  marking_worklist_(marking_worklist),
64  weak_objects_(weak_objects),
65  initial_old_generation_size_(0),
66  bytes_marked_ahead_of_schedule_(0),
67  bytes_marked_concurrently_(0),
68  unscanned_bytes_of_large_object_(0),
69  is_compacting_(false),
70  should_hurry_(false),
71  was_activated_(false),
72  black_allocation_(false),
73  finalize_marking_completed_(false),
74  trace_wrappers_toggle_(false),
75  request_type_(NONE),
76  new_generation_observer_(*this, kYoungGenerationAllocatedThreshold),
77  old_generation_observer_(*this, kOldGenerationAllocatedThreshold) {
78  DCHECK_NOT_NULL(marking_worklist_);
79  SetState(STOPPED);
80 }
81 
82 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
83  HeapObject* value_heap_obj = HeapObject::cast(value);
84  DCHECK(!marking_state()->IsImpossible(value_heap_obj));
85  DCHECK(!marking_state()->IsImpossible(obj));
86 #ifdef V8_CONCURRENT_MARKING
87  // The write barrier stub generated with V8_CONCURRENT_MARKING does not
88  // check the color of the source object.
89  const bool need_recording = true;
90 #else
91  const bool need_recording = marking_state()->IsBlack(obj);
92 #endif
93 
94  if (need_recording && WhiteToGreyAndPush(value_heap_obj)) {
95  RestartIfNotMarking();
96  }
97  return is_compacting_ && need_recording;
98 }
99 
100 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, HeapObjectSlot slot,
101  Object* value) {
102  if (BaseRecordWrite(obj, value) && slot.address() != kNullAddress) {
103  // Object is not going to be rescanned we need to record the slot.
104  heap_->mark_compact_collector()->RecordSlot(obj, slot,
105  HeapObject::cast(value));
106  }
107 }
108 
109 int IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
110  Address slot_address,
111  Isolate* isolate) {
112  DCHECK(obj->IsHeapObject());
113  MaybeObjectSlot slot(slot_address);
114  isolate->heap()->incremental_marking()->RecordMaybeWeakWrite(obj, slot,
115  *slot);
116  // Called by RecordWriteCodeStubAssembler, which doesnt accept void type
117  return 0;
118 }
119 
120 void IncrementalMarking::RecordWriteIntoCode(Code host, RelocInfo* rinfo,
121  HeapObject* value) {
122  DCHECK(IsMarking());
123  if (BaseRecordWrite(host, value)) {
124  // Object is not going to be rescanned. We need to record the slot.
125  heap_->mark_compact_collector()->RecordRelocSlot(host, rinfo, value);
126  }
127 }
128 
129 bool IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj) {
130  if (marking_state()->WhiteToGrey(obj)) {
131  marking_worklist()->Push(obj);
132  return true;
133  }
134  return false;
135 }
136 
137 void IncrementalMarking::MarkBlackAndPush(HeapObject* obj) {
138  // Marking left-trimmable fixed array black is unsafe because left-trimming
139  // re-pushes only grey arrays onto the marking worklist.
140  DCHECK(!obj->IsFixedArray() && !obj->IsFixedDoubleArray());
141  // Color the object black and push it into the bailout deque.
142  marking_state()->WhiteToGrey(obj);
143  if (marking_state()->GreyToBlack(obj)) {
144  if (FLAG_concurrent_marking) {
145  marking_worklist()->PushBailout(obj);
146  } else {
147  marking_worklist()->Push(obj);
148  }
149  }
150 }
151 
152 void IncrementalMarking::NotifyLeftTrimming(HeapObject* from, HeapObject* to) {
153  DCHECK(IsMarking());
154  DCHECK(MemoryChunk::FromAddress(from->address())->SweepingDone());
155  DCHECK_EQ(MemoryChunk::FromAddress(from->address()),
156  MemoryChunk::FromAddress(to->address()));
157  DCHECK_NE(from, to);
158 
159  MarkBit old_mark_bit = marking_state()->MarkBitFrom(from);
160  MarkBit new_mark_bit = marking_state()->MarkBitFrom(to);
161 
162  if (black_allocation() && Marking::IsBlack<kAtomicity>(new_mark_bit)) {
163  // Nothing to do if the object is in black area.
164  return;
165  }
166 
167  bool marked_black_due_to_left_trimming = false;
168  if (FLAG_concurrent_marking) {
169  // We need to mark the array black before overwriting its map and length
170  // so that the concurrent marker does not observe inconsistent state.
171  Marking::WhiteToGrey<kAtomicity>(old_mark_bit);
172  if (Marking::GreyToBlack<kAtomicity>(old_mark_bit)) {
173  // The concurrent marker will not mark the array. We need to push the
174  // new array start in marking deque to ensure that it will be marked.
175  marked_black_due_to_left_trimming = true;
176  }
177  DCHECK(Marking::IsBlack<kAtomicity>(old_mark_bit));
178  }
179 
180  if (Marking::IsBlack<kAtomicity>(old_mark_bit) &&
181  !marked_black_due_to_left_trimming) {
182  // The array was black before left trimming or was marked black by the
183  // concurrent marker. Simply transfer the color.
184  if (from->address() + kPointerSize == to->address()) {
185  // The old and the new markbits overlap. The |to| object has the
186  // grey color. To make it black, we need to set the second bit.
187  DCHECK(new_mark_bit.Get<kAtomicity>());
188  new_mark_bit.Next().Set<kAtomicity>();
189  } else {
190  bool success = Marking::WhiteToBlack<kAtomicity>(new_mark_bit);
191  DCHECK(success);
192  USE(success);
193  }
194  } else if (Marking::IsGrey<kAtomicity>(old_mark_bit) ||
195  marked_black_due_to_left_trimming) {
196  // The array was already grey or was marked black by this function.
197  // Mark the new array grey and push it to marking deque.
198  if (from->address() + kPointerSize == to->address()) {
199  // The old and the new markbits overlap. The |to| object is either white
200  // or grey. Set the first bit to make sure that it is grey.
201  new_mark_bit.Set<kAtomicity>();
202  DCHECK(!new_mark_bit.Next().Get<kAtomicity>());
203  } else {
204  bool success = Marking::WhiteToGrey<kAtomicity>(new_mark_bit);
205  DCHECK(success);
206  USE(success);
207  }
208  // Subsequent left-trimming will re-push only grey arrays.
209  // Ensure that this array is grey.
210  DCHECK(Marking::IsGrey<kAtomicity>(new_mark_bit));
211  marking_worklist()->PushBailout(to);
212  RestartIfNotMarking();
213  }
214 }
215 
217  public:
219  IncrementalMarking* incremental_marking)
220  : heap_(incremental_marking->heap()) {}
221 
222  void VisitRootPointer(Root root, const char* description,
223  ObjectSlot p) override {
224  MarkObjectByPointer(p);
225  }
226 
227  void VisitRootPointers(Root root, const char* description, ObjectSlot start,
228  ObjectSlot end) override {
229  for (ObjectSlot p = start; p < end; ++p) MarkObjectByPointer(p);
230  }
231 
232  private:
233  void MarkObjectByPointer(ObjectSlot p) {
234  Object* obj = *p;
235  if (!obj->IsHeapObject()) return;
236 
237  heap_->incremental_marking()->WhiteToGreyAndPush(HeapObject::cast(obj));
238  }
239 
240  Heap* heap_;
241 };
242 
243 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
244  PagedSpace* space) {
245  for (Page* p : *space) {
246  p->SetOldGenerationPageFlags(false);
247  }
248 }
249 
250 
251 void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
252  NewSpace* space) {
253  for (Page* p : *space) {
254  p->SetYoungGenerationPageFlags(false);
255  }
256 }
257 
258 
259 void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
260  DeactivateIncrementalWriteBarrierForSpace(heap_->old_space());
261  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
262  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
263  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
264 
265  for (LargePage* p : *heap_->lo_space()) {
266  p->SetOldGenerationPageFlags(false);
267  }
268 
269  for (LargePage* p : *heap_->code_lo_space()) {
270  p->SetOldGenerationPageFlags(false);
271  }
272 }
273 
274 
275 void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
276  for (Page* p : *space) {
277  p->SetOldGenerationPageFlags(true);
278  }
279 }
280 
281 
282 void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
283  for (Page* p : *space) {
284  p->SetYoungGenerationPageFlags(true);
285  }
286 }
287 
288 
289 void IncrementalMarking::ActivateIncrementalWriteBarrier() {
290  ActivateIncrementalWriteBarrier(heap_->old_space());
291  ActivateIncrementalWriteBarrier(heap_->map_space());
292  ActivateIncrementalWriteBarrier(heap_->code_space());
293  ActivateIncrementalWriteBarrier(heap_->new_space());
294 
295  for (LargePage* p : *heap_->lo_space()) {
296  p->SetOldGenerationPageFlags(true);
297  }
298 
299  for (LargePage* p : *heap_->code_lo_space()) {
300  p->SetOldGenerationPageFlags(true);
301  }
302 }
303 
304 
305 bool IncrementalMarking::WasActivated() { return was_activated_; }
306 
307 
308 bool IncrementalMarking::CanBeActivated() {
309  // Only start incremental marking in a safe state: 1) when incremental
310  // marking is turned on, 2) when we are currently not in a GC, and
311  // 3) when we are currently not serializing or deserializing the heap.
312  return FLAG_incremental_marking && heap_->gc_state() == Heap::NOT_IN_GC &&
313  heap_->deserialization_complete() &&
314  !heap_->isolate()->serializer_enabled();
315 }
316 
317 
318 void IncrementalMarking::Deactivate() {
319  DeactivateIncrementalWriteBarrier();
320 }
321 
322 void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
323  if (FLAG_trace_incremental_marking) {
324  int old_generation_size_mb =
325  static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
326  int old_generation_limit_mb =
327  static_cast<int>(heap()->old_generation_allocation_limit() / MB);
328  heap()->isolate()->PrintWithTimestamp(
329  "[IncrementalMarking] Start (%s): old generation %dMB, limit %dMB, "
330  "slack %dMB\n",
331  Heap::GarbageCollectionReasonToString(gc_reason),
332  old_generation_size_mb, old_generation_limit_mb,
333  Max(0, old_generation_limit_mb - old_generation_size_mb));
334  }
335  DCHECK(FLAG_incremental_marking);
336  DCHECK(state_ == STOPPED);
337  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
338  DCHECK(!heap_->isolate()->serializer_enabled());
339 
340  Counters* counters = heap_->isolate()->counters();
341 
342  counters->incremental_marking_reason()->AddSample(
343  static_cast<int>(gc_reason));
344  HistogramTimerScope incremental_marking_scope(
345  counters->gc_incremental_marking_start());
346  TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
347  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_START);
348  heap_->tracer()->NotifyIncrementalMarkingStart();
349 
350  start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
351  initial_old_generation_size_ = heap_->OldGenerationSizeOfObjects();
352  old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
353  bytes_allocated_ = 0;
354  bytes_marked_ahead_of_schedule_ = 0;
355  bytes_marked_concurrently_ = 0;
356  should_hurry_ = false;
357  was_activated_ = true;
358 
359  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
360  StartMarking();
361  } else {
362  if (FLAG_trace_incremental_marking) {
363  heap()->isolate()->PrintWithTimestamp(
364  "[IncrementalMarking] Start sweeping.\n");
365  }
366  SetState(SWEEPING);
367  }
368 
369  heap_->AddAllocationObserversToAllSpaces(&old_generation_observer_,
370  &new_generation_observer_);
371  incremental_marking_job()->Start(heap_);
372 }
373 
374 
375 void IncrementalMarking::StartMarking() {
376  if (heap_->isolate()->serializer_enabled()) {
377  // Black allocation currently starts when we start incremental marking,
378  // but we cannot enable black allocation while deserializing. Hence, we
379  // have to delay the start of incremental marking in that case.
380  if (FLAG_trace_incremental_marking) {
381  heap()->isolate()->PrintWithTimestamp(
382  "[IncrementalMarking] Start delayed - serializer\n");
383  }
384  return;
385  }
386  if (FLAG_trace_incremental_marking) {
387  heap()->isolate()->PrintWithTimestamp(
388  "[IncrementalMarking] Start marking\n");
389  }
390 
391  is_compacting_ =
392  !FLAG_never_compact && heap_->mark_compact_collector()->StartCompaction();
393 
394  SetState(MARKING);
395 
396  {
397  TRACE_GC(heap()->tracer(),
398  GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_PROLOGUE);
399  heap_->local_embedder_heap_tracer()->TracePrologue();
400  }
401 
402  ActivateIncrementalWriteBarrier();
403 
404 // Marking bits are cleared by the sweeper.
405 #ifdef VERIFY_HEAP
406  if (FLAG_verify_heap) {
407  heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
408  }
409 #endif
410 
411  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
412 
413 #ifdef V8_CONCURRENT_MARKING
414  // The write-barrier does not check the color of the source object.
415  // Start black allocation earlier to ensure faster marking progress.
416  if (!black_allocation_) {
417  StartBlackAllocation();
418  }
419 #endif
420 
421  // Mark strong roots grey.
422  IncrementalMarkingRootMarkingVisitor visitor(this);
423  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
424 
425  if (FLAG_concurrent_marking && !heap_->IsTearingDown()) {
426  heap_->concurrent_marking()->ScheduleTasks();
427  }
428 
429  // Ready to start incremental marking.
430  if (FLAG_trace_incremental_marking) {
431  heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Running\n");
432  }
433 }
434 
435 void IncrementalMarking::StartBlackAllocation() {
436  DCHECK(FLAG_black_allocation);
437  DCHECK(!black_allocation_);
438  DCHECK(IsMarking());
439  black_allocation_ = true;
440  heap()->old_space()->MarkLinearAllocationAreaBlack();
441  heap()->map_space()->MarkLinearAllocationAreaBlack();
442  heap()->code_space()->MarkLinearAllocationAreaBlack();
443  if (FLAG_trace_incremental_marking) {
444  heap()->isolate()->PrintWithTimestamp(
445  "[IncrementalMarking] Black allocation started\n");
446  }
447 }
448 
449 void IncrementalMarking::PauseBlackAllocation() {
450  DCHECK(FLAG_black_allocation);
451  DCHECK(IsMarking());
452  heap()->old_space()->UnmarkLinearAllocationArea();
453  heap()->map_space()->UnmarkLinearAllocationArea();
454  heap()->code_space()->UnmarkLinearAllocationArea();
455  if (FLAG_trace_incremental_marking) {
456  heap()->isolate()->PrintWithTimestamp(
457  "[IncrementalMarking] Black allocation paused\n");
458  }
459  black_allocation_ = false;
460 }
461 
462 void IncrementalMarking::FinishBlackAllocation() {
463  if (black_allocation_) {
464  black_allocation_ = false;
465  if (FLAG_trace_incremental_marking) {
466  heap()->isolate()->PrintWithTimestamp(
467  "[IncrementalMarking] Black allocation finished\n");
468  }
469  }
470 }
471 
472 void IncrementalMarking::MarkRoots() {
473  DCHECK(!finalize_marking_completed_);
474  DCHECK(IsMarking());
475 
476  IncrementalMarkingRootMarkingVisitor visitor(this);
477  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
478 }
479 
480 bool IncrementalMarking::ShouldRetainMap(Map map, int age) {
481  if (age == 0) {
482  // The map has aged. Do not retain this map.
483  return false;
484  }
485  Object* constructor = map->GetConstructor();
486  if (!constructor->IsHeapObject() ||
487  marking_state()->IsWhite(HeapObject::cast(constructor))) {
488  // The constructor is dead, no new objects with this map can
489  // be created. Do not retain this map.
490  return false;
491  }
492  return true;
493 }
494 
495 
496 void IncrementalMarking::RetainMaps() {
497  // Do not retain dead maps if flag disables it or there is
498  // - memory pressure (reduce_memory_footprint_),
499  // - GC is requested by tests or dev-tools (abort_incremental_marking_).
500  bool map_retaining_is_disabled = heap()->ShouldReduceMemory() ||
501  FLAG_retain_maps_for_n_gc == 0;
502  WeakArrayList* retained_maps = heap()->retained_maps();
503  int length = retained_maps->length();
504  // The number_of_disposed_maps separates maps in the retained_maps
505  // array that were created before and after context disposal.
506  // We do not age and retain disposed maps to avoid memory leaks.
507  int number_of_disposed_maps = heap()->number_of_disposed_maps_;
508  for (int i = 0; i < length; i += 2) {
509  MaybeObject value = retained_maps->Get(i);
510  HeapObject* map_heap_object;
511  if (!value->GetHeapObjectIfWeak(&map_heap_object)) {
512  continue;
513  }
514  int age = retained_maps->Get(i + 1).ToSmi().value();
515  int new_age;
516  Map map = Map::cast(map_heap_object);
517  if (i >= number_of_disposed_maps && !map_retaining_is_disabled &&
518  marking_state()->IsWhite(map)) {
519  if (ShouldRetainMap(map, age)) {
520  WhiteToGreyAndPush(map);
521  }
522  Object* prototype = map->prototype();
523  if (age > 0 && prototype->IsHeapObject() &&
524  marking_state()->IsWhite(HeapObject::cast(prototype))) {
525  // The prototype is not marked, age the map.
526  new_age = age - 1;
527  } else {
528  // The prototype and the constructor are marked, this map keeps only
529  // transition tree alive, not JSObjects. Do not age the map.
530  new_age = age;
531  }
532  } else {
533  new_age = FLAG_retain_maps_for_n_gc;
534  }
535  // Compact the array and update the age.
536  if (new_age != age) {
537  retained_maps->Set(i + 1, MaybeObject::FromSmi(Smi::FromInt(new_age)));
538  }
539  }
540 }
541 
542 void IncrementalMarking::FinalizeIncrementally() {
543  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_FINALIZE_BODY);
544  DCHECK(!finalize_marking_completed_);
545  DCHECK(IsMarking());
546 
547  double start = heap_->MonotonicallyIncreasingTimeInMs();
548 
549  // After finishing incremental marking, we try to discover all unmarked
550  // objects to reduce the marking load in the final pause.
551  // 1) We scan and mark the roots again to find all changes to the root set.
552  // 2) Age and retain maps embedded in optimized code.
553  MarkRoots();
554 
555  // Map retaining is needed for perfromance, not correctness,
556  // so we can do it only once at the beginning of the finalization.
557  RetainMaps();
558 
559  finalize_marking_completed_ = true;
560 
561  if (FLAG_black_allocation && !heap()->ShouldReduceMemory() &&
562  !black_allocation_) {
563  // TODO(hpayer): Move to an earlier point as soon as we make faster marking
564  // progress.
565  StartBlackAllocation();
566  }
567 
568  if (FLAG_trace_incremental_marking) {
569  double end = heap_->MonotonicallyIncreasingTimeInMs();
570  double delta = end - start;
571  heap()->isolate()->PrintWithTimestamp(
572  "[IncrementalMarking] Finalize incrementally spent %.1f ms.\n", delta);
573  }
574 }
575 
576 void IncrementalMarking::UpdateMarkingWorklistAfterScavenge() {
577  if (!IsMarking()) return;
578 
579  Map filler_map = ReadOnlyRoots(heap_).one_pointer_filler_map();
580 
581 #ifdef ENABLE_MINOR_MC
582  MinorMarkCompactCollector::MarkingState* minor_marking_state =
583  heap()->minor_mark_compact_collector()->marking_state();
584 #else
585  void* minor_marking_state = nullptr;
586 #endif // ENABLE_MINOR_MC
587 
588  marking_worklist()->Update([
589 #ifdef DEBUG
590  // this is referred inside DCHECK.
591  this,
592 #endif
593  filler_map, minor_marking_state](
594  HeapObject* obj, HeapObject** out) -> bool {
595  DCHECK(obj->IsHeapObject());
596  // Only pointers to from space have to be updated.
597  if (Heap::InFromSpace(obj)) {
598  MapWord map_word = obj->map_word();
599  if (!map_word.IsForwardingAddress()) {
600  // There may be objects on the marking deque that do not exist anymore,
601  // e.g. left trimmed objects or objects from the root set (frames).
602  // If these object are dead at scavenging time, their marking deque
603  // entries will not point to forwarding addresses. Hence, we can discard
604  // them.
605  return false;
606  }
607  HeapObject* dest = map_word.ToForwardingAddress();
608  DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
609  *out = dest;
610  return true;
611  } else if (Heap::InToSpace(obj)) {
612  // The object may be on a page that was moved in new space.
613  DCHECK(
614  Page::FromAddress(obj->address())->IsFlagSet(Page::SWEEP_TO_ITERATE));
615 #ifdef ENABLE_MINOR_MC
616  if (minor_marking_state->IsGrey(obj)) {
617  *out = obj;
618  return true;
619  }
620 #endif // ENABLE_MINOR_MC
621  return false;
622  } else {
623  // The object may be on a page that was moved from new to old space. Only
624  // applicable during minor MC garbage collections.
625  if (Page::FromAddress(obj->address())
626  ->IsFlagSet(Page::SWEEP_TO_ITERATE)) {
627 #ifdef ENABLE_MINOR_MC
628  if (minor_marking_state->IsGrey(obj)) {
629  *out = obj;
630  return true;
631  }
632 #endif // ENABLE_MINOR_MC
633  return false;
634  }
635  DCHECK_IMPLIES(marking_state()->IsWhite(obj), obj->IsFiller());
636  // Skip one word filler objects that appear on the
637  // stack when we perform in place array shift.
638  if (obj->map() != filler_map) {
639  *out = obj;
640  return true;
641  }
642  return false;
643  }
644  });
645 
646  UpdateWeakReferencesAfterScavenge();
647 }
648 
649 namespace {
650 template <typename T, typename = typename std::enable_if<
651  std::is_base_of<HeapObject, T>::value>::type>
652 T* ForwardingAddress(T* heap_obj) {
653  MapWord map_word = heap_obj->map_word();
654 
655  if (map_word.IsForwardingAddress()) {
656  return T::cast(map_word.ToForwardingAddress());
657  } else if (Heap::InNewSpace(heap_obj)) {
658  return nullptr;
659  } else {
660  return heap_obj;
661  }
662 }
663 
664 // TODO(3770): Replacement for the above.
665 template <typename T, typename = typename std::enable_if<
666  std::is_base_of<HeapObjectPtr, T>::value>::type>
667 T ForwardingAddress(T heap_obj) {
668  MapWord map_word = heap_obj->map_word();
669 
670  if (map_word.IsForwardingAddress()) {
671  return T::cast(map_word.ToForwardingAddress());
672  } else if (Heap::InNewSpace(heap_obj)) {
673  return T();
674  } else {
675  return heap_obj;
676  }
677 }
678 } // namespace
679 
680 void IncrementalMarking::UpdateWeakReferencesAfterScavenge() {
681  weak_objects_->weak_references.Update(
682  [](std::pair<HeapObject*, HeapObjectSlot> slot_in,
683  std::pair<HeapObject*, HeapObjectSlot>* slot_out) -> bool {
684  HeapObject* heap_obj = slot_in.first;
685  HeapObject* forwarded = ForwardingAddress(heap_obj);
686 
687  if (forwarded) {
688  ptrdiff_t distance_to_slot = slot_in.second.address() -
689  reinterpret_cast<Address>(slot_in.first);
690  Address new_slot =
691  reinterpret_cast<Address>(forwarded) + distance_to_slot;
692  slot_out->first = forwarded;
693  slot_out->second = HeapObjectSlot(new_slot);
694  return true;
695  }
696 
697  return false;
698  });
699  weak_objects_->weak_objects_in_code.Update(
700  [](std::pair<HeapObject*, Code> slot_in,
701  std::pair<HeapObject*, Code>* slot_out) -> bool {
702  HeapObject* heap_obj = slot_in.first;
703  HeapObject* forwarded = ForwardingAddress(heap_obj);
704 
705  if (forwarded) {
706  slot_out->first = forwarded;
707  slot_out->second = slot_in.second;
708  return true;
709  }
710 
711  return false;
712  });
713  weak_objects_->ephemeron_hash_tables.Update(
714  [](EphemeronHashTable slot_in, EphemeronHashTable* slot_out) -> bool {
715  EphemeronHashTable forwarded = ForwardingAddress(slot_in);
716 
717  if (!forwarded.is_null()) {
718  *slot_out = forwarded;
719  return true;
720  }
721 
722  return false;
723  });
724 
725  auto ephemeron_updater = [](Ephemeron slot_in, Ephemeron* slot_out) -> bool {
726  HeapObject* key = slot_in.key;
727  HeapObject* value = slot_in.value;
728  HeapObject* forwarded_key = ForwardingAddress(key);
729  HeapObject* forwarded_value = ForwardingAddress(value);
730 
731  if (forwarded_key && forwarded_value) {
732  *slot_out = Ephemeron{forwarded_key, forwarded_value};
733  return true;
734  }
735 
736  return false;
737  };
738 
739  weak_objects_->current_ephemerons.Update(ephemeron_updater);
740  weak_objects_->next_ephemerons.Update(ephemeron_updater);
741  weak_objects_->discovered_ephemerons.Update(ephemeron_updater);
742 }
743 
744 void IncrementalMarking::UpdateMarkedBytesAfterScavenge(
745  size_t dead_bytes_in_new_space) {
746  if (!IsMarking()) return;
747  bytes_marked_ahead_of_schedule_ -=
748  Min(bytes_marked_ahead_of_schedule_, dead_bytes_in_new_space);
749 }
750 
751 bool IncrementalMarking::IsFixedArrayWithProgressBar(HeapObject* obj) {
752  if (!obj->IsFixedArray()) return false;
753  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
754  return chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR);
755 }
756 
757 int IncrementalMarking::VisitObject(Map map, HeapObject* obj) {
758  DCHECK(marking_state()->IsGrey(obj) || marking_state()->IsBlack(obj));
759  if (!marking_state()->GreyToBlack(obj)) {
760  // The object can already be black in these cases:
761  // 1. The object is a fixed array with the progress bar.
762  // 2. The object is a JSObject that was colored black before
763  // unsafe layout change.
764  // 3. The object is a string that was colored black before
765  // unsafe layout change.
766  // 4. The object is materizalized by the deoptimizer.
767  DCHECK(obj->IsHashTable() || obj->IsPropertyArray() ||
768  obj->IsFixedArray() || obj->IsContext() || obj->IsJSObject() ||
769  obj->IsString());
770  }
771  DCHECK(marking_state()->IsBlack(obj));
772  WhiteToGreyAndPush(map);
773  IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
774  marking_state());
775  return visitor.Visit(map, obj);
776 }
777 
778 void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
779  if (IsMarking() && marking_state()->IsBlack(obj)) {
780  RevisitObject(obj);
781  }
782 }
783 
784 void IncrementalMarking::RevisitObject(HeapObject* obj) {
785  DCHECK(IsMarking());
786  DCHECK(FLAG_concurrent_marking || marking_state()->IsBlack(obj));
787  Page* page = Page::FromAddress(obj->address());
788  if (page->owner()->identity() == LO_SPACE) {
789  page->ResetProgressBar();
790  }
791  Map map = obj->map();
792  WhiteToGreyAndPush(map);
793  IncrementalMarkingMarkingVisitor visitor(heap()->mark_compact_collector(),
794  marking_state());
795  visitor.Visit(map, obj);
796 }
797 
798 template <WorklistToProcess worklist_to_process>
799 intptr_t IncrementalMarking::ProcessMarkingWorklist(
800  intptr_t bytes_to_process, ForceCompletionAction completion) {
801  intptr_t bytes_processed = 0;
802  while (bytes_processed < bytes_to_process || completion == FORCE_COMPLETION) {
803  HeapObject* obj;
804  if (worklist_to_process == WorklistToProcess::kBailout) {
805  obj = marking_worklist()->PopBailout();
806  } else {
807  obj = marking_worklist()->Pop();
808  }
809  if (obj == nullptr) break;
810  // Left trimming may result in white, grey, or black filler objects on the
811  // marking deque. Ignore these objects.
812  if (obj->IsFiller()) {
813  DCHECK(!marking_state()->IsImpossible(obj));
814  continue;
815  }
816  unscanned_bytes_of_large_object_ = 0;
817  int size = VisitObject(obj->map(), obj);
818  bytes_processed += size - unscanned_bytes_of_large_object_;
819  }
820  return bytes_processed;
821 }
822 
823 void IncrementalMarking::EmbedderStep(double duration_ms) {
824  constexpr size_t kObjectsToProcessBeforeInterrupt = 500;
825 
826  TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_INCREMENTAL_EMBEDDER_TRACING);
827  double deadline = heap_->MonotonicallyIncreasingTimeInMs() + duration_ms;
828  bool empty_worklist;
829  do {
830  {
831  LocalEmbedderHeapTracer::ProcessingScope scope(
832  heap_->local_embedder_heap_tracer());
833  HeapObject* object;
834  size_t cnt = 0;
835  empty_worklist = true;
836  while (marking_worklist()->embedder()->Pop(0, &object)) {
837  scope.TracePossibleWrapper(JSObject::cast(object));
838  if (++cnt == kObjectsToProcessBeforeInterrupt) {
839  cnt = 0;
840  empty_worklist = false;
841  break;
842  }
843  }
844  }
845  heap_->local_embedder_heap_tracer()->Trace(deadline);
846  } while (!empty_worklist &&
847  (heap_->MonotonicallyIncreasingTimeInMs() < deadline));
848  heap_->local_embedder_heap_tracer()->SetEmbedderWorklistEmpty(empty_worklist);
849 }
850 
851 void IncrementalMarking::Hurry() {
852  // A scavenge may have pushed new objects on the marking deque (due to black
853  // allocation) even in COMPLETE state. This may happen if scavenges are
854  // forced e.g. in tests. It should not happen when COMPLETE was set when
855  // incremental marking finished and a regular GC was triggered after that
856  // because should_hurry_ will force a full GC.
857  if (!marking_worklist()->IsEmpty()) {
858  double start = 0.0;
859  if (FLAG_trace_incremental_marking) {
860  start = heap_->MonotonicallyIncreasingTimeInMs();
861  if (FLAG_trace_incremental_marking) {
862  heap()->isolate()->PrintWithTimestamp("[IncrementalMarking] Hurry\n");
863  }
864  }
865  // TODO(gc) hurry can mark objects it encounters black as mutator
866  // was stopped.
867  ProcessMarkingWorklist(0, FORCE_COMPLETION);
868  SetState(COMPLETE);
869  if (FLAG_trace_incremental_marking) {
870  double end = heap_->MonotonicallyIncreasingTimeInMs();
871  double delta = end - start;
872  if (FLAG_trace_incremental_marking) {
873  heap()->isolate()->PrintWithTimestamp(
874  "[IncrementalMarking] Complete (hurry), spent %d ms.\n",
875  static_cast<int>(delta));
876  }
877  }
878  }
879 }
880 
881 
882 void IncrementalMarking::Stop() {
883  if (IsStopped()) return;
884  if (FLAG_trace_incremental_marking) {
885  int old_generation_size_mb =
886  static_cast<int>(heap()->OldGenerationSizeOfObjects() / MB);
887  int old_generation_limit_mb =
888  static_cast<int>(heap()->old_generation_allocation_limit() / MB);
889  heap()->isolate()->PrintWithTimestamp(
890  "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
891  "overshoot %dMB\n",
892  old_generation_size_mb, old_generation_limit_mb,
893  Max(0, old_generation_size_mb - old_generation_limit_mb));
894  }
895 
896  SpaceIterator it(heap_);
897  while (it.has_next()) {
898  Space* space = it.next();
899  if (space == heap_->new_space()) {
900  space->RemoveAllocationObserver(&new_generation_observer_);
901  } else {
902  space->RemoveAllocationObserver(&old_generation_observer_);
903  }
904  }
905 
906  IncrementalMarking::set_should_hurry(false);
907  heap_->isolate()->stack_guard()->ClearGC();
908  SetState(STOPPED);
909  is_compacting_ = false;
910  FinishBlackAllocation();
911 }
912 
913 
914 void IncrementalMarking::Finalize() {
915  Hurry();
916  Stop();
917 }
918 
919 
920 void IncrementalMarking::FinalizeMarking(CompletionAction action) {
921  DCHECK(!finalize_marking_completed_);
922  if (FLAG_trace_incremental_marking) {
923  heap()->isolate()->PrintWithTimestamp(
924  "[IncrementalMarking] requesting finalization of incremental "
925  "marking.\n");
926  }
927  request_type_ = FINALIZATION;
928  if (action == GC_VIA_STACK_GUARD) {
929  heap_->isolate()->stack_guard()->RequestGC();
930  }
931 }
932 
933 
934 void IncrementalMarking::MarkingComplete(CompletionAction action) {
935  SetState(COMPLETE);
936  // We will set the stack guard to request a GC now. This will mean the rest
937  // of the GC gets performed as soon as possible (we can't do a GC here in a
938  // record-write context). If a few things get allocated between now and then
939  // that shouldn't make us do a scavenge and keep being incremental, so we set
940  // the should-hurry flag to indicate that there can't be much work left to do.
941  set_should_hurry(true);
942  if (FLAG_trace_incremental_marking) {
943  heap()->isolate()->PrintWithTimestamp(
944  "[IncrementalMarking] Complete (normal).\n");
945  }
946  request_type_ = COMPLETE_MARKING;
947  if (action == GC_VIA_STACK_GUARD) {
948  heap_->isolate()->stack_guard()->RequestGC();
949  }
950 }
951 
952 
953 void IncrementalMarking::Epilogue() {
954  was_activated_ = false;
955  finalize_marking_completed_ = false;
956 }
957 
958 bool IncrementalMarking::ShouldDoEmbedderStep() {
959  return state_ == MARKING && FLAG_incremental_marking_wrappers &&
960  heap_->local_embedder_heap_tracer()->InUse();
961 }
962 
963 double IncrementalMarking::AdvanceIncrementalMarking(
964  double deadline_in_ms, CompletionAction completion_action,
965  StepOrigin step_origin) {
966  HistogramTimerScope incremental_marking_scope(
967  heap_->isolate()->counters()->gc_incremental_marking());
968  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
969  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
970  DCHECK(!IsStopped());
971 
972  double remaining_time_in_ms = 0.0;
973  do {
974  if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
975  EmbedderStep(kStepSizeInMs);
976  } else {
977  const intptr_t step_size_in_bytes =
978  GCIdleTimeHandler::EstimateMarkingStepSize(
979  kStepSizeInMs,
980  heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
981  Step(step_size_in_bytes, completion_action, step_origin);
982  }
983  trace_wrappers_toggle_ = !trace_wrappers_toggle_;
984  remaining_time_in_ms =
985  deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
986  } while (remaining_time_in_ms > kStepSizeInMs && !IsComplete() &&
987  !marking_worklist()->IsEmpty());
988  return remaining_time_in_ms;
989 }
990 
991 
992 void IncrementalMarking::FinalizeSweeping() {
993  DCHECK(state_ == SWEEPING);
994  if (heap_->mark_compact_collector()->sweeping_in_progress() &&
995  (!FLAG_concurrent_sweeping ||
996  !heap_->mark_compact_collector()->sweeper()->AreSweeperTasksRunning())) {
997  heap_->mark_compact_collector()->EnsureSweepingCompleted();
998  }
999  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1000 #ifdef DEBUG
1001  heap_->VerifyCountersAfterSweeping();
1002 #endif
1003  StartMarking();
1004  }
1005 }
1006 
1007 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
1008  // Update bytes_allocated_ based on the allocation counter.
1009  size_t current_counter = heap_->OldGenerationAllocationCounter();
1010  bytes_allocated_ += current_counter - old_generation_allocation_counter_;
1011  old_generation_allocation_counter_ = current_counter;
1012  return bytes_allocated_;
1013 }
1014 
1015 size_t IncrementalMarking::StepSizeToMakeProgress() {
1016  const size_t kTargetStepCount = 256;
1017  const size_t kTargetStepCountAtOOM = 32;
1018  const size_t kMaxStepSizeInByte = 256 * KB;
1019  size_t oom_slack = heap()->new_space()->Capacity() + 64 * MB;
1020 
1021  if (!heap()->CanExpandOldGeneration(oom_slack)) {
1022  return heap()->OldGenerationSizeOfObjects() / kTargetStepCountAtOOM;
1023  }
1024 
1025  return Min(Max(initial_old_generation_size_ / kTargetStepCount,
1026  IncrementalMarking::kMinStepSizeInBytes),
1027  kMaxStepSizeInByte);
1028 }
1029 
1030 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
1031  // Code using an AlwaysAllocateScope assumes that the GC state does not
1032  // change; that implies that no marking steps must be performed.
1033  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1034  (state_ != SWEEPING && state_ != MARKING) || heap_->always_allocate()) {
1035  return;
1036  }
1037 
1038  HistogramTimerScope incremental_marking_scope(
1039  heap_->isolate()->counters()->gc_incremental_marking());
1040  TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1041  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1042 
1043  if (ShouldDoEmbedderStep() && trace_wrappers_toggle_) {
1044  EmbedderStep(kMaxStepSizeInMs);
1045  } else {
1046  size_t bytes_to_process =
1047  StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
1048  if (bytes_to_process >= IncrementalMarking::kMinStepSizeInBytes) {
1049  // The first step after Scavenge will see many allocated bytes.
1050  // Cap the step size to distribute the marking work more uniformly.
1051  size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1052  kMaxStepSizeInMs,
1053  heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1054  bytes_to_process = Min(bytes_to_process, max_step_size);
1055  size_t bytes_processed = 0;
1056  if (FLAG_concurrent_marking) {
1057  bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
1058  StepOrigin::kV8, WorklistToProcess::kBailout);
1059  bytes_to_process = (bytes_processed >= bytes_to_process)
1060  ? 0
1061  : bytes_to_process - bytes_processed;
1062  size_t current_bytes_marked_concurrently =
1063  heap()->concurrent_marking()->TotalMarkedBytes();
1064  // The concurrent_marking()->TotalMarkedBytes() is not monothonic for a
1065  // short period of time when a concurrent marking task is finishing.
1066  if (current_bytes_marked_concurrently > bytes_marked_concurrently_) {
1067  bytes_marked_ahead_of_schedule_ +=
1068  current_bytes_marked_concurrently - bytes_marked_concurrently_;
1069  bytes_marked_concurrently_ = current_bytes_marked_concurrently;
1070  }
1071  }
1072  if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1073  // Steps performed in tasks and concurrently have put us ahead of
1074  // schedule. We skip processing of marking dequeue here and thus shift
1075  // marking time from inside V8 to standalone tasks.
1076  bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1077  bytes_processed += bytes_to_process;
1078  bytes_to_process = IncrementalMarking::kMinStepSizeInBytes;
1079  }
1080  bytes_processed += Step(bytes_to_process, GC_VIA_STACK_GUARD,
1081  StepOrigin::kV8, WorklistToProcess::kAll);
1082  bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
1083  }
1084  }
1085  trace_wrappers_toggle_ = !trace_wrappers_toggle_;
1086 }
1087 
1088 size_t IncrementalMarking::Step(size_t bytes_to_process,
1089  CompletionAction action, StepOrigin step_origin,
1090  WorklistToProcess worklist_to_process) {
1091  double start = heap_->MonotonicallyIncreasingTimeInMs();
1092 
1093  if (state_ == SWEEPING) {
1094  TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1095  FinalizeSweeping();
1096  }
1097 
1098  size_t bytes_processed = 0;
1099  if (state_ == MARKING) {
1100  if (FLAG_concurrent_marking) {
1101  heap_->new_space()->ResetOriginalTop();
1102  // It is safe to merge back all objects that were on hold to the shared
1103  // work list at Step because we are at a safepoint where all objects
1104  // are properly initialized.
1105  marking_worklist()->shared()->MergeGlobalPool(
1106  marking_worklist()->on_hold());
1107  }
1108 
1109 // Only print marking worklist in debug mode to save ~40KB of code size.
1110 #ifdef DEBUG
1111  if (FLAG_trace_incremental_marking && FLAG_trace_concurrent_marking &&
1112  FLAG_trace_gc_verbose) {
1113  marking_worklist()->Print();
1114  }
1115 #endif
1116 
1117  if (worklist_to_process == WorklistToProcess::kBailout) {
1118  bytes_processed =
1119  ProcessMarkingWorklist<WorklistToProcess::kBailout>(bytes_to_process);
1120  } else {
1121  bytes_processed =
1122  ProcessMarkingWorklist<WorklistToProcess::kAll>(bytes_to_process);
1123  }
1124 
1125  if (step_origin == StepOrigin::kTask) {
1126  bytes_marked_ahead_of_schedule_ += bytes_processed;
1127  }
1128 
1129  if (marking_worklist()->IsEmpty()) {
1130  if (heap_->local_embedder_heap_tracer()
1131  ->ShouldFinalizeIncrementalMarking()) {
1132  if (!finalize_marking_completed_) {
1133  FinalizeMarking(action);
1134  } else {
1135  MarkingComplete(action);
1136  }
1137  } else {
1138  heap_->local_embedder_heap_tracer()->NotifyV8MarkingWorklistWasEmpty();
1139  }
1140  }
1141  }
1142  if (FLAG_concurrent_marking) {
1143  heap_->concurrent_marking()->RescheduleTasksIfNeeded();
1144  }
1145 
1146  double end = heap_->MonotonicallyIncreasingTimeInMs();
1147  double duration = (end - start);
1148  // Note that we report zero bytes here when sweeping was in progress or
1149  // when we just started incremental marking. In these cases we did not
1150  // process the marking deque.
1151  heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1152  if (FLAG_trace_incremental_marking) {
1153  heap_->isolate()->PrintWithTimestamp(
1154  "[IncrementalMarking] Step %s %" PRIuS "KB (%" PRIuS "KB) in %.1f\n",
1155  step_origin == StepOrigin::kV8 ? "in v8" : "in task",
1156  bytes_processed / KB, bytes_to_process / KB, duration);
1157  }
1158  if (FLAG_trace_concurrent_marking) {
1159  heap_->isolate()->PrintWithTimestamp(
1160  "Concurrently marked %" PRIuS "KB\n",
1161  heap_->concurrent_marking()->TotalMarkedBytes() / KB);
1162  }
1163  return bytes_processed;
1164 }
1165 
1166 } // namespace internal
1167 } // namespace v8
Definition: libplatform.h:13