5 #include "src/heap/concurrent-marking.h" 8 #include <unordered_map> 10 #include "include/v8config.h" 11 #include "src/base/template-utils.h" 12 #include "src/heap/gc-tracer.h" 13 #include "src/heap/heap-inl.h" 14 #include "src/heap/heap.h" 15 #include "src/heap/mark-compact-inl.h" 16 #include "src/heap/mark-compact.h" 17 #include "src/heap/marking.h" 18 #include "src/heap/objects-visiting-inl.h" 19 #include "src/heap/objects-visiting.h" 20 #include "src/heap/worklist.h" 21 #include "src/isolate.h" 22 #include "src/objects/hash-table-inl.h" 23 #include "src/objects/slots-inl.h" 24 #include "src/utils-inl.h" 25 #include "src/utils.h" 35 : live_bytes_(live_bytes) {}
38 DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
39 reinterpret_cast<intptr_t>(chunk),
40 MemoryChunk::kMarkBitmapOffset);
41 return chunk->marking_bitmap_;
44 void IncrementLiveBytes(
MemoryChunk* chunk, intptr_t by) {
45 (*live_bytes_)[chunk] += by;
52 LiveBytesMap* live_bytes_;
59 int number_of_slots()
const {
return number_of_slots_; }
60 ObjectSlot slot(
int i)
const {
return snapshot_[
i].first; }
61 Object* value(
int i)
const {
return snapshot_[
i].second; }
62 void clear() { number_of_slots_ = 0; }
64 snapshot_[number_of_slots_].first = slot;
65 snapshot_[number_of_slots_].second = value;
70 static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
72 std::pair<ObjectSlot, Object*> snapshot_[kMaxSnapshotSize];
77 :
public HeapVisitor<int, ConcurrentMarkingVisitor> {
86 bool embedder_tracing_enabled)
87 : shared_(shared, task_id),
88 bailout_(bailout, task_id),
89 weak_objects_(weak_objects),
90 embedder_objects_(embedder_objects, task_id),
91 marking_state_(live_bytes),
93 embedder_tracing_enabled_(embedder_tracing_enabled) {}
95 template <
typename T,
typename =
typename std::enable_if<
96 std::is_base_of<Object, T>::value>
::type>
98 return T::cast(
object);
101 template <
typename T,
typename =
typename std::enable_if<
102 std::is_base_of<ObjectPtr, T>::value>
::type>
104 return T::cast(
object);
108 return marking_state_.GreyToBlack(
object);
111 bool AllowDefaultJSObjectVisit() {
return false; }
115 MarkObject(heap_object);
116 MarkCompactCollector::RecordSlot(host, slot, heap_object);
121 #ifdef THREAD_SANITIZER 125 MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
126 CHECK_NOT_NULL(chunk->synchronized_heap());
128 if (marking_state_.IsBlackOrGrey(heap_object)) {
132 MarkCompactCollector::RecordSlot(host, slot, heap_object);
137 weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
143 for (
ObjectSlot slot = start; slot < end; ++slot) {
144 Object*
object = slot.Relaxed_Load();
145 DCHECK(!HasWeakHeapObjectTag(
object));
146 if (object->IsHeapObject()) {
147 ProcessStrongHeapObject(host, slot, HeapObject::cast(
object));
157 if (object->GetHeapObjectIfStrong(&heap_object)) {
161 ProcessStrongHeapObject(host,
ObjectSlot(slot), heap_object);
162 }
else if (object->GetHeapObjectIfWeak(&heap_object)) {
174 for (
int i = 0;
i < snapshot.number_of_slots();
i++) {
176 Object*
object = snapshot.value(
i);
177 DCHECK(!HasWeakHeapObjectTag(
object));
178 if (!object->IsHeapObject())
continue;
179 HeapObject* heap_object = HeapObject::cast(
object);
180 MarkObject(heap_object);
181 MarkCompactCollector::RecordSlot(host, slot, heap_object);
190 return VisitJSObjectSubclass(map,
object);
194 return VisitJSObjectSubclass(map,
object);
198 return VisitJSObjectSubclass(map,
object);
202 int size = VisitJSObjectSubclass(map, weak_cell);
207 if (weak_cell->target()->IsHeapObject()) {
208 HeapObject* target = HeapObject::cast(weak_cell->target());
209 if (marking_state_.IsBlackOrGrey(target)) {
213 HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
214 MarkCompactCollector::RecordSlot(weak_cell, slot, target);
218 weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
228 return VisitEmbedderTracingSubclass(map,
object);
232 return VisitEmbedderTracingSubclass(map,
object);
236 return VisitEmbedderTracingSubclass(map,
object);
240 return VisitEmbedderTracingSubclass(map,
object);
248 int size = ConsString::BodyDescriptor::SizeOf(map,
object);
249 return VisitWithSnapshot(map,
object, size, size);
253 int size = SlicedString::BodyDescriptor::SizeOf(map,
object);
254 return VisitWithSnapshot(map,
object, size, size);
258 int size = ThinString::BodyDescriptor::SizeOf(map,
object);
259 return VisitWithSnapshot(map,
object, size, size);
267 int size = SeqOneByteString::SizeFor(object->synchronized_length());
268 if (!ShouldVisit(
object))
return 0;
269 VisitMapPointer(
object, object->map_slot());
274 int size = SeqTwoByteString::SizeFor(object->synchronized_length());
275 if (!ShouldVisit(
object))
return 0;
276 VisitMapPointer(
object, object->map_slot());
285 return VisitLeftTrimmableArray(map,
object);
289 return VisitLeftTrimmableArray(map,
object);
296 int VisitCode(
Map map,
Code object) {
297 bailout_.Push(
object);
306 if (!ShouldVisit(
object))
return 0;
307 int size = BytecodeArray::BodyDescriptor::SizeOf(map,
object);
308 VisitMapPointer(
object, object->map_slot());
309 BytecodeArray::BodyDescriptor::IterateBody(map,
object, size,
this);
314 int VisitMap(
Map meta_map,
Map map) {
315 if (marking_state_.IsGrey(map)) {
319 VisitMapPointer(map, map->map_slot());
320 VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
322 map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
323 VisitPointer(map, HeapObject::RawMaybeWeakField(
324 map, Map::kTransitionsOrPrototypeInfoOffset));
325 VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
332 if (!ShouldVisit(array))
return 0;
333 VisitMapPointer(array, array->map_slot());
334 int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
335 TransitionArray::BodyDescriptor::IterateBody(map, array, size,
this);
336 weak_objects_->transition_arrays.Push(task_id_, array);
341 return VisitJSObjectSubclass(map,
object);
345 if (!ShouldVisit(table))
return 0;
346 weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
348 for (
int i = 0;
i < table->Capacity();
i++) {
350 table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(
i));
351 HeapObject* key = HeapObject::cast(table->KeyAt(
i));
352 MarkCompactCollector::RecordSlot(table, key_slot, key);
355 table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(
i));
357 if (marking_state_.IsBlackOrGrey(key)) {
358 VisitPointer(table, value_slot);
361 Object* value_obj = table->ValueAt(
i);
363 if (value_obj->IsHeapObject()) {
364 HeapObject* value = HeapObject::cast(value_obj);
365 MarkCompactCollector::RecordSlot(table, value_slot, value);
369 if (marking_state_.IsWhite(value)) {
370 weak_objects_->discovered_ephemerons.Push(task_id_,
377 return table->SizeFromMap(map);
383 if (marking_state_.IsBlackOrGrey(key)) {
384 if (marking_state_.WhiteToGrey(value)) {
389 }
else if (marking_state_.IsWhite(value)) {
390 weak_objects_->next_ephemerons.Push(task_id_,
Ephemeron{key, value});
397 #ifdef THREAD_SANITIZER 401 MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
402 CHECK_NOT_NULL(chunk->synchronized_heap());
404 if (marking_state_.WhiteToGrey(
object)) {
405 shared_.Push(
object);
413 explicit SlotSnapshottingVisitor(
SlotSnapshot* slot_snapshot)
414 : slot_snapshot_(slot_snapshot) {
415 slot_snapshot_->clear();
421 Object*
object = p.Relaxed_Load();
422 slot_snapshot_->add(p,
object);
435 DCHECK(host->IsJSWeakCell());
442 template <
typename T>
443 int VisitJSObjectSubclass(
Map map,
T*
object) {
444 int size = T::BodyDescriptor::SizeOf(map,
object);
445 int used_size = map->UsedInstanceSize();
446 DCHECK_LE(used_size, size);
447 DCHECK_GE(used_size, T::kHeaderSize);
448 return VisitWithSnapshot(map,
object, used_size, size);
451 template <
typename T>
452 int VisitEmbedderTracingSubclass(
Map map,
T*
object) {
453 DCHECK(object->IsApiWrapper());
454 int size = VisitJSObjectSubclass(map,
object);
455 if (size && embedder_tracing_enabled_) {
458 embedder_objects_.Push(
object);
463 template <
typename T>
464 int VisitLeftTrimmableArray(
Map map,
T object) {
467 Object* length =
object->unchecked_synchronized_length();
468 if (!ShouldVisit(
object))
return 0;
471 DCHECK(length->IsSmi());
472 int size = T::SizeFor(Smi::ToInt(length));
473 VisitMapPointer(
object, object->map_slot());
474 T::BodyDescriptor::IterateBody(map,
object, size,
this);
478 template <
typename T>
479 int VisitWithSnapshot(
Map map,
T object,
int used_size,
int size) {
480 const SlotSnapshot& snapshot = MakeSlotSnapshot(map,
object, used_size);
481 if (!ShouldVisit(
object))
return 0;
482 VisitPointersInSnapshot(
object, snapshot);
486 template <
typename T>
488 SlotSnapshottingVisitor visitor(&slot_snapshot_);
489 visitor.VisitPointer(
object,
ObjectSlot(object->map_slot().address()));
491 std::remove_pointer<T>::type::BodyDescriptor::IterateBody(map,
object, size,
493 return slot_snapshot_;
496 ConcurrentMarking::MarkingWorklist::View shared_;
497 ConcurrentMarking::MarkingWorklist::View bailout_;
499 ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
503 bool embedder_tracing_enabled_;
510 return ConsString::unchecked_cast(
object);
514 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject*
object) {
515 return SlicedString::unchecked_cast(
object);
519 ThinString ConcurrentMarkingVisitor::Cast(HeapObject*
object) {
520 return ThinString::unchecked_cast(
object);
524 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject*
object) {
525 return SeqOneByteString::unchecked_cast(
object);
529 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject*
object) {
530 return SeqTwoByteString::unchecked_cast(
object);
535 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject*
object) {
536 return FixedArray::unchecked_cast(
object);
542 TaskState* task_state,
int task_id)
544 concurrent_marking_(concurrent_marking),
545 task_state_(task_state),
548 ~
Task()
override =
default;
552 void RunInternal()
override {
553 concurrent_marking_->Run(task_id_, task_state_);
557 TaskState* task_state_;
559 DISALLOW_COPY_AND_ASSIGN(
Task);
571 weak_objects_(weak_objects),
572 embedder_objects_(embedder_objects) {
574 #ifndef V8_CONCURRENT_MARKING 575 CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
579 void ConcurrentMarking::Run(
int task_id, TaskState* task_state) {
580 TRACE_BACKGROUND_GC(heap_->tracer(),
581 GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
582 size_t kBytesUntilInterruptCheck = 64 * KB;
583 int kObjectsUntilInterrupCheck = 1000;
585 shared_, bailout_, &task_state->live_bytes, weak_objects_,
586 embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
588 size_t marked_bytes = 0;
589 if (FLAG_trace_concurrent_marking) {
590 heap_->isolate()->PrintWithTimestamp(
591 "Starting concurrent marking task %d\n", task_id);
593 bool ephemeron_marked =
false;
596 TimedScope scope(&time_ms);
601 while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
602 if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
603 ephemeron_marked =
true;
610 size_t current_marked_bytes = 0;
611 int objects_processed = 0;
612 while (current_marked_bytes < kBytesUntilInterruptCheck &&
613 objects_processed < kObjectsUntilInterrupCheck) {
615 if (!shared_->Pop(task_id, &
object)) {
621 Address new_space_top = heap_->new_space()->original_top_acquire();
622 Address new_space_limit = heap_->new_space()->original_limit_relaxed();
623 Address addr =
object->address();
624 if (new_space_top <= addr && addr < new_space_limit) {
625 on_hold_->Push(task_id,
object);
627 Map map =
object->synchronized_map();
628 current_marked_bytes += visitor.Visit(map,
object);
631 marked_bytes += current_marked_bytes;
632 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
634 if (task_state->preemption_request) {
635 TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT(
"v8.gc"),
636 "ConcurrentMarking::Run Preempted");
644 while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
645 if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
646 ephemeron_marked =
true;
651 shared_->FlushToGlobal(task_id);
652 bailout_->FlushToGlobal(task_id);
653 on_hold_->FlushToGlobal(task_id);
654 embedder_objects_->FlushToGlobal(task_id);
656 weak_objects_->transition_arrays.FlushToGlobal(task_id);
657 weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
658 weak_objects_->current_ephemerons.FlushToGlobal(task_id);
659 weak_objects_->next_ephemerons.FlushToGlobal(task_id);
660 weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
661 weak_objects_->weak_references.FlushToGlobal(task_id);
662 weak_objects_->js_weak_cells.FlushToGlobal(task_id);
663 base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
664 total_marked_bytes_ += marked_bytes;
666 if (ephemeron_marked) {
667 set_ephemeron_marked(
true);
671 base::MutexGuard guard(&pending_lock_);
672 is_pending_[task_id] =
false;
673 --pending_task_count_;
674 pending_condition_.NotifyAll();
677 if (FLAG_trace_concurrent_marking) {
678 heap_->isolate()->PrintWithTimestamp(
679 "Task %d concurrently marked %dKB in %.2fms\n", task_id,
680 static_cast<int>(marked_bytes / KB), time_ms);
684 void ConcurrentMarking::ScheduleTasks() {
685 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
686 DCHECK(!heap_->IsTearingDown());
687 base::MutexGuard guard(&pending_lock_);
688 DCHECK_EQ(0, pending_task_count_);
689 if (task_count_ == 0) {
690 static const int num_cores =
691 V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
692 #if defined(V8_OS_MACOSX) 697 task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
698 #else // defined(OS_MACOSX) 701 task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
702 #endif // defined(OS_MACOSX) 705 for (
int i = 1;
i <= task_count_;
i++) {
706 if (!is_pending_[
i]) {
707 if (FLAG_trace_concurrent_marking) {
708 heap_->isolate()->PrintWithTimestamp(
709 "Scheduling concurrent marking task %d\n",
i);
711 task_state_[
i].preemption_request =
false;
712 is_pending_[
i] =
true;
713 ++pending_task_count_;
715 base::make_unique<Task>(heap_->isolate(),
this, &task_state_[
i],
i);
716 cancelable_id_[
i] = task->id();
717 V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
720 DCHECK_EQ(task_count_, pending_task_count_);
723 void ConcurrentMarking::RescheduleTasksIfNeeded() {
724 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
725 if (heap_->IsTearingDown())
return;
727 base::MutexGuard guard(&pending_lock_);
728 if (pending_task_count_ > 0)
return;
730 if (!shared_->IsGlobalPoolEmpty() ||
731 !weak_objects_->current_ephemerons.IsEmpty() ||
732 !weak_objects_->discovered_ephemerons.IsEmpty()) {
737 bool ConcurrentMarking::Stop(StopRequest stop_request) {
738 DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
739 base::MutexGuard guard(&pending_lock_);
741 if (pending_task_count_ == 0)
return false;
743 if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
744 CancelableTaskManager* task_manager =
745 heap_->isolate()->cancelable_task_manager();
746 for (
int i = 1;
i <= task_count_;
i++) {
747 if (is_pending_[
i]) {
748 if (task_manager->TryAbort(cancelable_id_[
i]) ==
749 TryAbortResult::kTaskAborted) {
750 is_pending_[
i] =
false;
751 --pending_task_count_;
752 }
else if (stop_request == StopRequest::PREEMPT_TASKS) {
753 task_state_[
i].preemption_request =
true;
758 while (pending_task_count_ > 0) {
759 pending_condition_.Wait(&pending_lock_);
761 for (
int i = 1;
i <= task_count_;
i++) {
762 DCHECK(!is_pending_[
i]);
767 bool ConcurrentMarking::IsStopped() {
768 if (!FLAG_concurrent_marking)
return true;
770 base::MutexGuard guard(&pending_lock_);
771 return pending_task_count_ == 0;
774 void ConcurrentMarking::FlushLiveBytes(
775 MajorNonAtomicMarkingState* marking_state) {
776 DCHECK_EQ(pending_task_count_, 0);
777 for (
int i = 1;
i <= task_count_;
i++) {
778 LiveBytesMap& live_bytes = task_state_[
i].live_bytes;
779 for (
auto pair : live_bytes) {
782 if (pair.second != 0) {
783 marking_state->IncrementLiveBytes(pair.first, pair.second);
787 task_state_[
i].marked_bytes = 0;
789 total_marked_bytes_ = 0;
792 void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
793 for (
int i = 1;
i <= task_count_;
i++) {
794 if (task_state_[
i].live_bytes.count(chunk)) {
795 task_state_[
i].live_bytes[chunk] = 0;
800 size_t ConcurrentMarking::TotalMarkedBytes() {
802 for (
int i = 1;
i <= task_count_;
i++) {
804 base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[
i].marked_bytes);
806 result += total_marked_bytes_;
810 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
811 : concurrent_marking_(concurrent_marking),
812 resume_on_exit_(FLAG_concurrent_marking &&
813 concurrent_marking_->Stop(
814 ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
815 DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
818 ConcurrentMarking::PauseScope::~PauseScope() {
819 if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();