V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
scavenger-inl.h
1 // Copyright 2015 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #ifndef V8_HEAP_SCAVENGER_INL_H_
6 #define V8_HEAP_SCAVENGER_INL_H_
7 
8 #include "src/heap/scavenger.h"
9 
10 #include "src/heap/incremental-marking-inl.h"
11 #include "src/heap/local-allocator-inl.h"
12 #include "src/objects-inl.h"
13 #include "src/objects/map.h"
14 #include "src/objects/slots-inl.h"
15 
16 namespace v8 {
17 namespace internal {
18 
19 void Scavenger::PromotionList::View::PushRegularObject(HeapObject* object,
20  int size) {
21  promotion_list_->PushRegularObject(task_id_, object, size);
22 }
23 
24 void Scavenger::PromotionList::View::PushLargeObject(HeapObject* object,
25  Map map, int size) {
26  promotion_list_->PushLargeObject(task_id_, object, map, size);
27 }
28 
29 bool Scavenger::PromotionList::View::IsEmpty() {
30  return promotion_list_->IsEmpty();
31 }
32 
33 size_t Scavenger::PromotionList::View::LocalPushSegmentSize() {
34  return promotion_list_->LocalPushSegmentSize(task_id_);
35 }
36 
37 bool Scavenger::PromotionList::View::Pop(struct PromotionListEntry* entry) {
38  return promotion_list_->Pop(task_id_, entry);
39 }
40 
41 bool Scavenger::PromotionList::View::IsGlobalPoolEmpty() {
42  return promotion_list_->IsGlobalPoolEmpty();
43 }
44 
45 bool Scavenger::PromotionList::View::ShouldEagerlyProcessPromotionList() {
46  return promotion_list_->ShouldEagerlyProcessPromotionList(task_id_);
47 }
48 
49 void Scavenger::PromotionList::PushRegularObject(int task_id,
50  HeapObject* object, int size) {
51  regular_object_promotion_list_.Push(task_id, ObjectAndSize(object, size));
52 }
53 
54 void Scavenger::PromotionList::PushLargeObject(int task_id, HeapObject* object,
55  Map map, int size) {
56  large_object_promotion_list_.Push(task_id, {object, map, size});
57 }
58 
59 bool Scavenger::PromotionList::IsEmpty() {
60  return regular_object_promotion_list_.IsEmpty() &&
61  large_object_promotion_list_.IsEmpty();
62 }
63 
64 size_t Scavenger::PromotionList::LocalPushSegmentSize(int task_id) {
65  return regular_object_promotion_list_.LocalPushSegmentSize(task_id) +
66  large_object_promotion_list_.LocalPushSegmentSize(task_id);
67 }
68 
69 bool Scavenger::PromotionList::Pop(int task_id,
70  struct PromotionListEntry* entry) {
71  ObjectAndSize regular_object;
72  if (regular_object_promotion_list_.Pop(task_id, &regular_object)) {
73  entry->heap_object = regular_object.first;
74  entry->size = regular_object.second;
75  entry->map = entry->heap_object->map();
76  return true;
77  }
78  return large_object_promotion_list_.Pop(task_id, entry);
79 }
80 
81 bool Scavenger::PromotionList::IsGlobalPoolEmpty() {
82  return regular_object_promotion_list_.IsGlobalPoolEmpty() &&
83  large_object_promotion_list_.IsGlobalPoolEmpty();
84 }
85 
86 bool Scavenger::PromotionList::ShouldEagerlyProcessPromotionList(int task_id) {
87  // Threshold when to prioritize processing of the promotion list. Right
88  // now we only look into the regular object list.
89  const int kProcessPromotionListThreshold =
90  kRegularObjectPromotionListSegmentSize / 2;
91  return LocalPushSegmentSize(task_id) < kProcessPromotionListThreshold;
92 }
93 
94 // White list for objects that for sure only contain data.
95 bool Scavenger::ContainsOnlyData(VisitorId visitor_id) {
96  switch (visitor_id) {
97  case kVisitSeqOneByteString:
98  return true;
99  case kVisitSeqTwoByteString:
100  return true;
101  case kVisitByteArray:
102  return true;
103  case kVisitFixedDoubleArray:
104  return true;
105  case kVisitDataObject:
106  return true;
107  default:
108  break;
109  }
110  return false;
111 }
112 
113 void Scavenger::PageMemoryFence(MaybeObject object) {
114 #ifdef THREAD_SANITIZER
115  // Perform a dummy acquire load to tell TSAN that there is no data race
116  // with page initialization.
117  HeapObject* heap_object;
118  if (object->GetHeapObject(&heap_object)) {
119  MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
120  CHECK_NOT_NULL(chunk->synchronized_heap());
121  }
122 #endif
123 }
124 
125 bool Scavenger::MigrateObject(Map map, HeapObject* source, HeapObject* target,
126  int size) {
127  // Copy the content of source to target.
128  target->set_map_word(MapWord::FromMap(map));
129  heap()->CopyBlock(target->address() + kPointerSize,
130  source->address() + kPointerSize, size - kPointerSize);
131 
132  ObjectPtr old = source->map_slot().Release_CompareAndSwap(
133  map, MapWord::FromForwardingAddress(target).ToMap());
134  if (old != map) {
135  // Other task migrated the object.
136  return false;
137  }
138 
139  if (V8_UNLIKELY(is_logging_)) {
140  heap()->OnMoveEvent(target, source, size);
141  }
142 
143  if (is_incremental_marking_) {
144  heap()->incremental_marking()->TransferColor(source, target);
145  }
146  heap()->UpdateAllocationSite(map, source, &local_pretenuring_feedback_);
147  return true;
148 }
149 
150 CopyAndForwardResult Scavenger::SemiSpaceCopyObject(Map map,
151  HeapObjectSlot slot,
152  HeapObject* object,
153  int object_size) {
154  DCHECK(heap()->AllowedToBeMigrated(object, NEW_SPACE));
155  AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
156  AllocationResult allocation =
157  allocator_.Allocate(NEW_SPACE, object_size, alignment);
158 
159  HeapObject* target = nullptr;
160  if (allocation.To(&target)) {
161  DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
162  target));
163  const bool self_success = MigrateObject(map, object, target, object_size);
164  if (!self_success) {
165  allocator_.FreeLast(NEW_SPACE, target, object_size);
166  MapWord map_word = object->synchronized_map_word();
167  HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
168  DCHECK(!Heap::InFromSpace(*slot));
169  return Heap::InToSpace(*slot)
170  ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
171  : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
172  }
173  HeapObjectReference::Update(slot, target);
174 
175  copied_list_.Push(ObjectAndSize(target, object_size));
176  copied_size_ += object_size;
177  return CopyAndForwardResult::SUCCESS_YOUNG_GENERATION;
178  }
179  return CopyAndForwardResult::FAILURE;
180 }
181 
182 CopyAndForwardResult Scavenger::PromoteObject(Map map, HeapObjectSlot slot,
183  HeapObject* object,
184  int object_size) {
185  AllocationAlignment alignment = HeapObject::RequiredAlignment(map);
186  AllocationResult allocation =
187  allocator_.Allocate(OLD_SPACE, object_size, alignment);
188 
189  HeapObject* target = nullptr;
190  if (allocation.To(&target)) {
191  DCHECK(heap()->incremental_marking()->non_atomic_marking_state()->IsWhite(
192  target));
193  const bool self_success = MigrateObject(map, object, target, object_size);
194  if (!self_success) {
195  allocator_.FreeLast(OLD_SPACE, target, object_size);
196  MapWord map_word = object->synchronized_map_word();
197  HeapObjectReference::Update(slot, map_word.ToForwardingAddress());
198  DCHECK(!Heap::InFromSpace(*slot));
199  return Heap::InToSpace(*slot)
200  ? CopyAndForwardResult::SUCCESS_YOUNG_GENERATION
201  : CopyAndForwardResult::SUCCESS_OLD_GENERATION;
202  }
203  HeapObjectReference::Update(slot, target);
204  if (!ContainsOnlyData(map->visitor_id())) {
205  promotion_list_.PushRegularObject(target, object_size);
206  }
207  promoted_size_ += object_size;
208  return CopyAndForwardResult::SUCCESS_OLD_GENERATION;
209  }
210  return CopyAndForwardResult::FAILURE;
211 }
212 
213 SlotCallbackResult Scavenger::RememberedSetEntryNeeded(
214  CopyAndForwardResult result) {
215  DCHECK_NE(CopyAndForwardResult::FAILURE, result);
216  return result == CopyAndForwardResult::SUCCESS_YOUNG_GENERATION ? KEEP_SLOT
217  : REMOVE_SLOT;
218 }
219 
220 bool Scavenger::HandleLargeObject(Map map, HeapObject* object,
221  int object_size) {
222  // TODO(hpayer): Make this check size based, i.e.
223  // object_size > kMaxRegularHeapObjectSize
224  if (V8_UNLIKELY(
225  FLAG_young_generation_large_objects &&
226  MemoryChunk::FromHeapObject(object)->IsInNewLargeObjectSpace())) {
227  DCHECK_EQ(NEW_LO_SPACE,
228  MemoryChunk::FromHeapObject(object)->owner()->identity());
229  if (object->map_slot().Release_CompareAndSwap(
230  map, MapWord::FromForwardingAddress(object).ToMap()) == map) {
231  surviving_new_large_objects_.insert({object, map});
232 
233  if (!ContainsOnlyData(map->visitor_id())) {
234  promotion_list_.PushLargeObject(object, map, object_size);
235  }
236  }
237  return true;
238  }
239  return false;
240 }
241 
242 SlotCallbackResult Scavenger::EvacuateObjectDefault(Map map,
243  HeapObjectSlot slot,
244  HeapObject* object,
245  int object_size) {
246  SLOW_DCHECK(object->SizeFromMap(map) == object_size);
247  CopyAndForwardResult result;
248 
249  if (HandleLargeObject(map, object, object_size)) {
250  return REMOVE_SLOT;
251  }
252 
253  SLOW_DCHECK(static_cast<size_t>(object_size) <=
254  MemoryChunkLayout::AllocatableMemoryInDataPage());
255 
256  if (!heap()->ShouldBePromoted(object->address())) {
257  // A semi-space copy may fail due to fragmentation. In that case, we
258  // try to promote the object.
259  result = SemiSpaceCopyObject(map, slot, object, object_size);
260  if (result != CopyAndForwardResult::FAILURE) {
261  return RememberedSetEntryNeeded(result);
262  }
263  }
264 
265  // We may want to promote this object if the object was already semi-space
266  // copied in a previes young generation GC or if the semi-space copy above
267  // failed.
268  result = PromoteObject(map, slot, object, object_size);
269  if (result != CopyAndForwardResult::FAILURE) {
270  return RememberedSetEntryNeeded(result);
271  }
272 
273  // If promotion failed, we try to copy the object to the other semi-space.
274  result = SemiSpaceCopyObject(map, slot, object, object_size);
275  if (result != CopyAndForwardResult::FAILURE) {
276  return RememberedSetEntryNeeded(result);
277  }
278 
279  heap()->FatalProcessOutOfMemory("Scavenger: semi-space copy");
280  UNREACHABLE();
281 }
282 
283 SlotCallbackResult Scavenger::EvacuateThinString(Map map, HeapObjectSlot slot,
284  ThinString object,
285  int object_size) {
286  if (!is_incremental_marking_) {
287  // The ThinString should die after Scavenge, so avoid writing the proper
288  // forwarding pointer and instead just signal the actual object as forwarded
289  // reference.
290  String actual = object->actual();
291  // ThinStrings always refer to internalized strings, which are always in old
292  // space.
293  DCHECK(!Heap::InNewSpace(actual));
294  slot.StoreHeapObject(actual);
295  return REMOVE_SLOT;
296  }
297 
298  return EvacuateObjectDefault(map, slot, object, object_size);
299 }
300 
301 SlotCallbackResult Scavenger::EvacuateShortcutCandidate(Map map,
302  HeapObjectSlot slot,
303  ConsString object,
304  int object_size) {
305  DCHECK(IsShortcutCandidate(map->instance_type()));
306  if (!is_incremental_marking_ &&
307  object->unchecked_second() == ReadOnlyRoots(heap()).empty_string()) {
308  HeapObject* first = HeapObject::cast(object->unchecked_first());
309 
310  slot.StoreHeapObject(first);
311 
312  if (!Heap::InNewSpace(first)) {
313  object->map_slot().Release_Store(
314  MapWord::FromForwardingAddress(first).ToMap());
315  return REMOVE_SLOT;
316  }
317 
318  MapWord first_word = first->synchronized_map_word();
319  if (first_word.IsForwardingAddress()) {
320  HeapObject* target = first_word.ToForwardingAddress();
321 
322  slot.StoreHeapObject(target);
323  object->map_slot().Release_Store(
324  MapWord::FromForwardingAddress(target).ToMap());
325  return Heap::InToSpace(target) ? KEEP_SLOT : REMOVE_SLOT;
326  }
327  Map map = first_word.ToMap();
328  SlotCallbackResult result =
329  EvacuateObjectDefault(map, slot, first, first->SizeFromMap(map));
330  object->map_slot().Release_Store(
331  MapWord::FromForwardingAddress(slot.ToHeapObject()).ToMap());
332  return result;
333  }
334 
335  return EvacuateObjectDefault(map, slot, object, object_size);
336 }
337 
338 SlotCallbackResult Scavenger::EvacuateObject(HeapObjectSlot slot, Map map,
339  HeapObject* source) {
340  SLOW_DCHECK(Heap::InFromSpace(source));
341  SLOW_DCHECK(!MapWord::FromMap(map).IsForwardingAddress());
342  int size = source->SizeFromMap(map);
343  // Cannot use ::cast() below because that would add checks in debug mode
344  // that require re-reading the map.
345  switch (map->visitor_id()) {
346  case kVisitThinString:
347  // At the moment we don't allow weak pointers to thin strings.
348  DCHECK(!(*slot)->IsWeak());
349  return EvacuateThinString(map, slot, ThinString::unchecked_cast(source),
350  size);
351  case kVisitShortcutCandidate:
352  DCHECK(!(*slot)->IsWeak());
353  // At the moment we don't allow weak pointers to cons strings.
354  return EvacuateShortcutCandidate(
355  map, slot, ConsString::unchecked_cast(source), size);
356  default:
357  return EvacuateObjectDefault(map, slot, source, size);
358  }
359 }
360 
361 SlotCallbackResult Scavenger::ScavengeObject(HeapObjectSlot p,
362  HeapObject* object) {
363  DCHECK(Heap::InFromSpace(object));
364 
365  // Synchronized load that consumes the publishing CAS of MigrateObject.
366  MapWord first_word = object->synchronized_map_word();
367 
368  // If the first word is a forwarding address, the object has already been
369  // copied.
370  if (first_word.IsForwardingAddress()) {
371  HeapObject* dest = first_word.ToForwardingAddress();
372  DCHECK(Heap::InFromSpace(*p));
373  if ((*p)->IsWeak()) {
374  p.store(HeapObjectReference::Weak(dest));
375  } else {
376  DCHECK((*p)->IsStrong());
377  p.store(HeapObjectReference::Strong(dest));
378  }
379  DCHECK_IMPLIES(Heap::InNewSpace(dest),
380  (Heap::InToSpace(dest) ||
381  MemoryChunk::FromHeapObject(dest)->owner()->identity() ==
382  NEW_LO_SPACE));
383 
384  return Heap::InToSpace(dest) ? KEEP_SLOT : REMOVE_SLOT;
385  }
386 
387  Map map = first_word.ToMap();
388  // AllocationMementos are unrooted and shouldn't survive a scavenge
389  DCHECK_NE(ReadOnlyRoots(heap()).allocation_memento_map(), map);
390  // Call the slow part of scavenge object.
391  return EvacuateObject(p, map, object);
392 }
393 
394 SlotCallbackResult Scavenger::CheckAndScavengeObject(Heap* heap,
395  MaybeObjectSlot slot) {
396  MaybeObject object = *slot;
397  if (Heap::InFromSpace(object)) {
398  HeapObject* heap_object = object->GetHeapObject();
399  DCHECK(heap_object->IsHeapObject());
400 
401  SlotCallbackResult result =
402  ScavengeObject(HeapObjectSlot(slot), heap_object);
403  DCHECK_IMPLIES(result == REMOVE_SLOT,
404  !heap->IsInYoungGeneration((*slot)->GetHeapObject()));
405  return result;
406  } else if (Heap::InToSpace(object)) {
407  // Already updated slot. This can happen when processing of the work list
408  // is interleaved with processing roots.
409  return KEEP_SLOT;
410  }
411  // Slots can point to "to" space if the slot has been recorded multiple
412  // times in the remembered set. We remove the redundant slot now.
413  return REMOVE_SLOT;
414 }
415 
416 void ScavengeVisitor::VisitPointers(HeapObject* host, ObjectSlot start,
417  ObjectSlot end) {
418  for (ObjectSlot p = start; p < end; ++p) {
419  Object* object = *p;
420  if (!Heap::InNewSpace(object)) continue;
421  scavenger_->ScavengeObject(HeapObjectSlot(p),
422  reinterpret_cast<HeapObject*>(object));
423  }
424 }
425 
426 void ScavengeVisitor::VisitPointers(HeapObject* host, MaybeObjectSlot start,
427  MaybeObjectSlot end) {
428  for (MaybeObjectSlot p = start; p < end; ++p) {
429  MaybeObject object = *p;
430  if (!Heap::InNewSpace(object)) continue;
431  // Treat the weak reference as strong.
432  HeapObject* heap_object;
433  if (object->GetHeapObject(&heap_object)) {
434  scavenger_->ScavengeObject(HeapObjectSlot(p), heap_object);
435  } else {
436  UNREACHABLE();
437  }
438  }
439 }
440 
441 } // namespace internal
442 } // namespace v8
443 
444 #endif // V8_HEAP_SCAVENGER_INL_H_
Definition: libplatform.h:13