V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
concurrent-marking.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/concurrent-marking.h"
6 
7 #include <stack>
8 #include <unordered_map>
9 
10 #include "include/v8config.h"
11 #include "src/base/template-utils.h"
12 #include "src/heap/gc-tracer.h"
13 #include "src/heap/heap-inl.h"
14 #include "src/heap/heap.h"
15 #include "src/heap/mark-compact-inl.h"
16 #include "src/heap/mark-compact.h"
17 #include "src/heap/marking.h"
18 #include "src/heap/objects-visiting-inl.h"
19 #include "src/heap/objects-visiting.h"
20 #include "src/heap/worklist.h"
21 #include "src/isolate.h"
22 #include "src/objects/hash-table-inl.h"
23 #include "src/objects/slots-inl.h"
24 #include "src/utils-inl.h"
25 #include "src/utils.h"
26 #include "src/v8.h"
27 
28 namespace v8 {
29 namespace internal {
30 
32  : public MarkingStateBase<ConcurrentMarkingState, AccessMode::ATOMIC> {
33  public:
34  explicit ConcurrentMarkingState(LiveBytesMap* live_bytes)
35  : live_bytes_(live_bytes) {}
36 
37  Bitmap* bitmap(const MemoryChunk* chunk) {
38  DCHECK_EQ(reinterpret_cast<intptr_t>(&chunk->marking_bitmap_) -
39  reinterpret_cast<intptr_t>(chunk),
40  MemoryChunk::kMarkBitmapOffset);
41  return chunk->marking_bitmap_;
42  }
43 
44  void IncrementLiveBytes(MemoryChunk* chunk, intptr_t by) {
45  (*live_bytes_)[chunk] += by;
46  }
47 
48  // The live_bytes and SetLiveBytes methods of the marking state are
49  // not used by the concurrent marker.
50 
51  private:
52  LiveBytesMap* live_bytes_;
53 };
54 
55 // Helper class for storing in-object slot addresses and values.
56 class SlotSnapshot {
57  public:
58  SlotSnapshot() : number_of_slots_(0) {}
59  int number_of_slots() const { return number_of_slots_; }
60  ObjectSlot slot(int i) const { return snapshot_[i].first; }
61  Object* value(int i) const { return snapshot_[i].second; }
62  void clear() { number_of_slots_ = 0; }
63  void add(ObjectSlot slot, Object* value) {
64  snapshot_[number_of_slots_].first = slot;
65  snapshot_[number_of_slots_].second = value;
66  ++number_of_slots_;
67  }
68 
69  private:
70  static const int kMaxSnapshotSize = JSObject::kMaxInstanceSize / kPointerSize;
71  int number_of_slots_;
72  std::pair<ObjectSlot, Object*> snapshot_[kMaxSnapshotSize];
73  DISALLOW_COPY_AND_ASSIGN(SlotSnapshot);
74 };
75 
77  : public HeapVisitor<int, ConcurrentMarkingVisitor> {
78  public:
80 
81  explicit ConcurrentMarkingVisitor(
83  ConcurrentMarking::MarkingWorklist* bailout, LiveBytesMap* live_bytes,
84  WeakObjects* weak_objects,
85  ConcurrentMarking::EmbedderTracingWorklist* embedder_objects, int task_id,
86  bool embedder_tracing_enabled)
87  : shared_(shared, task_id),
88  bailout_(bailout, task_id),
89  weak_objects_(weak_objects),
90  embedder_objects_(embedder_objects, task_id),
91  marking_state_(live_bytes),
92  task_id_(task_id),
93  embedder_tracing_enabled_(embedder_tracing_enabled) {}
94 
95  template <typename T, typename = typename std::enable_if<
96  std::is_base_of<Object, T>::value>::type>
97  static V8_INLINE T* Cast(HeapObject* object) {
98  return T::cast(object);
99  }
100 
101  template <typename T, typename = typename std::enable_if<
102  std::is_base_of<ObjectPtr, T>::value>::type>
103  static V8_INLINE T Cast(HeapObject* object) {
104  return T::cast(object);
105  }
106 
107  bool ShouldVisit(HeapObject* object) {
108  return marking_state_.GreyToBlack(object);
109  }
110 
111  bool AllowDefaultJSObjectVisit() { return false; }
112 
113  void ProcessStrongHeapObject(HeapObject* host, ObjectSlot slot,
114  HeapObject* heap_object) {
115  MarkObject(heap_object);
116  MarkCompactCollector::RecordSlot(host, slot, heap_object);
117  }
118 
119  void ProcessWeakHeapObject(HeapObject* host, HeapObjectSlot slot,
120  HeapObject* heap_object) {
121 #ifdef THREAD_SANITIZER
122  // Perform a dummy acquire load to tell TSAN that there is no data race
123  // in mark-bit initialization. See MemoryChunk::Initialize for the
124  // corresponding release store.
125  MemoryChunk* chunk = MemoryChunk::FromAddress(heap_object->address());
126  CHECK_NOT_NULL(chunk->synchronized_heap());
127 #endif
128  if (marking_state_.IsBlackOrGrey(heap_object)) {
129  // Weak references with live values are directly processed here to
130  // reduce the processing time of weak cells during the main GC
131  // pause.
132  MarkCompactCollector::RecordSlot(host, slot, heap_object);
133  } else {
134  // If we do not know about liveness of the value, we have to process
135  // the reference when we know the liveness of the whole transitive
136  // closure.
137  weak_objects_->weak_references.Push(task_id_, std::make_pair(host, slot));
138  }
139  }
140 
141  void VisitPointers(HeapObject* host, ObjectSlot start,
142  ObjectSlot end) override {
143  for (ObjectSlot slot = start; slot < end; ++slot) {
144  Object* object = slot.Relaxed_Load();
145  DCHECK(!HasWeakHeapObjectTag(object));
146  if (object->IsHeapObject()) {
147  ProcessStrongHeapObject(host, slot, HeapObject::cast(object));
148  }
149  }
150  }
151 
152  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
153  MaybeObjectSlot end) override {
154  for (MaybeObjectSlot slot = start; slot < end; ++slot) {
155  MaybeObject object = slot.Relaxed_Load();
156  HeapObject* heap_object;
157  if (object->GetHeapObjectIfStrong(&heap_object)) {
158  // If the reference changes concurrently from strong to weak, the write
159  // barrier will treat the weak reference as strong, so we won't miss the
160  // weak reference.
161  ProcessStrongHeapObject(host, ObjectSlot(slot), heap_object);
162  } else if (object->GetHeapObjectIfWeak(&heap_object)) {
163  ProcessWeakHeapObject(host, HeapObjectSlot(slot), heap_object);
164  }
165  }
166  }
167 
168  // Weak list pointers should be ignored during marking. The lists are
169  // reconstructed after GC.
170  void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
171  ObjectSlot end) override {}
172 
173  void VisitPointersInSnapshot(HeapObject* host, const SlotSnapshot& snapshot) {
174  for (int i = 0; i < snapshot.number_of_slots(); i++) {
175  ObjectSlot slot = snapshot.slot(i);
176  Object* object = snapshot.value(i);
177  DCHECK(!HasWeakHeapObjectTag(object));
178  if (!object->IsHeapObject()) continue;
179  HeapObject* heap_object = HeapObject::cast(object);
180  MarkObject(heap_object);
181  MarkCompactCollector::RecordSlot(host, slot, heap_object);
182  }
183  }
184 
185  // ===========================================================================
186  // JS object =================================================================
187  // ===========================================================================
188 
189  int VisitJSObject(Map map, JSObject* object) {
190  return VisitJSObjectSubclass(map, object);
191  }
192 
193  int VisitJSObjectFast(Map map, JSObject* object) {
194  return VisitJSObjectSubclass(map, object);
195  }
196 
197  int VisitWasmInstanceObject(Map map, WasmInstanceObject* object) {
198  return VisitJSObjectSubclass(map, object);
199  }
200 
201  int VisitJSWeakCell(Map map, JSWeakCell* weak_cell) {
202  int size = VisitJSObjectSubclass(map, weak_cell);
203  if (size == 0) {
204  return 0;
205  }
206 
207  if (weak_cell->target()->IsHeapObject()) {
208  HeapObject* target = HeapObject::cast(weak_cell->target());
209  if (marking_state_.IsBlackOrGrey(target)) {
210  // Record the slot inside the JSWeakCell, since the
211  // VisitJSObjectSubclass above didn't visit it.
212  ObjectSlot slot =
213  HeapObject::RawField(weak_cell, JSWeakCell::kTargetOffset);
214  MarkCompactCollector::RecordSlot(weak_cell, slot, target);
215  } else {
216  // JSWeakCell points to a potentially dead object. We have to process
217  // them when we know the liveness of the whole transitive closure.
218  weak_objects_->js_weak_cells.Push(task_id_, weak_cell);
219  }
220  }
221  return size;
222  }
223 
224  // Some JS objects can carry back links to embedders that contain information
225  // relevant to the garbage collectors.
226 
227  int VisitJSApiObject(Map map, JSObject* object) {
228  return VisitEmbedderTracingSubclass(map, object);
229  }
230 
231  int VisitJSArrayBuffer(Map map, JSArrayBuffer* object) {
232  return VisitEmbedderTracingSubclass(map, object);
233  }
234 
235  int VisitJSDataView(Map map, JSDataView* object) {
236  return VisitEmbedderTracingSubclass(map, object);
237  }
238 
239  int VisitJSTypedArray(Map map, JSTypedArray* object) {
240  return VisitEmbedderTracingSubclass(map, object);
241  }
242 
243  // ===========================================================================
244  // Strings with pointers =====================================================
245  // ===========================================================================
246 
247  int VisitConsString(Map map, ConsString object) {
248  int size = ConsString::BodyDescriptor::SizeOf(map, object);
249  return VisitWithSnapshot(map, object, size, size);
250  }
251 
252  int VisitSlicedString(Map map, SlicedString object) {
253  int size = SlicedString::BodyDescriptor::SizeOf(map, object);
254  return VisitWithSnapshot(map, object, size, size);
255  }
256 
257  int VisitThinString(Map map, ThinString object) {
258  int size = ThinString::BodyDescriptor::SizeOf(map, object);
259  return VisitWithSnapshot(map, object, size, size);
260  }
261 
262  // ===========================================================================
263  // Strings without pointers ==================================================
264  // ===========================================================================
265 
266  int VisitSeqOneByteString(Map map, SeqOneByteString object) {
267  int size = SeqOneByteString::SizeFor(object->synchronized_length());
268  if (!ShouldVisit(object)) return 0;
269  VisitMapPointer(object, object->map_slot());
270  return size;
271  }
272 
273  int VisitSeqTwoByteString(Map map, SeqTwoByteString object) {
274  int size = SeqTwoByteString::SizeFor(object->synchronized_length());
275  if (!ShouldVisit(object)) return 0;
276  VisitMapPointer(object, object->map_slot());
277  return size;
278  }
279 
280  // ===========================================================================
281  // Fixed array object ========================================================
282  // ===========================================================================
283 
284  int VisitFixedArray(Map map, FixedArray object) {
285  return VisitLeftTrimmableArray(map, object);
286  }
287 
288  int VisitFixedDoubleArray(Map map, FixedDoubleArray object) {
289  return VisitLeftTrimmableArray(map, object);
290  }
291 
292  // ===========================================================================
293  // Code object ===============================================================
294  // ===========================================================================
295 
296  int VisitCode(Map map, Code object) {
297  bailout_.Push(object);
298  return 0;
299  }
300 
301  // ===========================================================================
302  // Side-effectful visitation.
303  // ===========================================================================
304 
305  int VisitBytecodeArray(Map map, BytecodeArray object) {
306  if (!ShouldVisit(object)) return 0;
307  int size = BytecodeArray::BodyDescriptor::SizeOf(map, object);
308  VisitMapPointer(object, object->map_slot());
309  BytecodeArray::BodyDescriptor::IterateBody(map, object, size, this);
310  object->MakeOlder();
311  return size;
312  }
313 
314  int VisitMap(Map meta_map, Map map) {
315  if (marking_state_.IsGrey(map)) {
316  // Maps have ad-hoc weakness for descriptor arrays. They also clear the
317  // code-cache. Conservatively visit strong fields skipping the
318  // descriptor array field and the code cache field.
319  VisitMapPointer(map, map->map_slot());
320  VisitPointer(map, HeapObject::RawField(map, Map::kPrototypeOffset));
321  VisitPointer(
322  map, HeapObject::RawField(map, Map::kConstructorOrBackPointerOffset));
323  VisitPointer(map, HeapObject::RawMaybeWeakField(
324  map, Map::kTransitionsOrPrototypeInfoOffset));
325  VisitPointer(map, HeapObject::RawField(map, Map::kDependentCodeOffset));
326  bailout_.Push(map);
327  }
328  return 0;
329  }
330 
331  int VisitTransitionArray(Map map, TransitionArray* array) {
332  if (!ShouldVisit(array)) return 0;
333  VisitMapPointer(array, array->map_slot());
334  int size = TransitionArray::BodyDescriptor::SizeOf(map, array);
335  TransitionArray::BodyDescriptor::IterateBody(map, array, size, this);
336  weak_objects_->transition_arrays.Push(task_id_, array);
337  return size;
338  }
339 
340  int VisitJSWeakCollection(Map map, JSWeakCollection* object) {
341  return VisitJSObjectSubclass(map, object);
342  }
343 
344  int VisitEphemeronHashTable(Map map, EphemeronHashTable table) {
345  if (!ShouldVisit(table)) return 0;
346  weak_objects_->ephemeron_hash_tables.Push(task_id_, table);
347 
348  for (int i = 0; i < table->Capacity(); i++) {
349  ObjectSlot key_slot =
350  table->RawFieldOfElementAt(EphemeronHashTable::EntryToIndex(i));
351  HeapObject* key = HeapObject::cast(table->KeyAt(i));
352  MarkCompactCollector::RecordSlot(table, key_slot, key);
353 
354  ObjectSlot value_slot =
355  table->RawFieldOfElementAt(EphemeronHashTable::EntryToValueIndex(i));
356 
357  if (marking_state_.IsBlackOrGrey(key)) {
358  VisitPointer(table, value_slot);
359 
360  } else {
361  Object* value_obj = table->ValueAt(i);
362 
363  if (value_obj->IsHeapObject()) {
364  HeapObject* value = HeapObject::cast(value_obj);
365  MarkCompactCollector::RecordSlot(table, value_slot, value);
366 
367  // Revisit ephemerons with both key and value unreachable at end
368  // of concurrent marking cycle.
369  if (marking_state_.IsWhite(value)) {
370  weak_objects_->discovered_ephemerons.Push(task_id_,
371  Ephemeron{key, value});
372  }
373  }
374  }
375  }
376 
377  return table->SizeFromMap(map);
378  }
379 
380  // Implements ephemeron semantics: Marks value if key is already reachable.
381  // Returns true if value was actually marked.
382  bool VisitEphemeron(HeapObject* key, HeapObject* value) {
383  if (marking_state_.IsBlackOrGrey(key)) {
384  if (marking_state_.WhiteToGrey(value)) {
385  shared_.Push(value);
386  return true;
387  }
388 
389  } else if (marking_state_.IsWhite(value)) {
390  weak_objects_->next_ephemerons.Push(task_id_, Ephemeron{key, value});
391  }
392 
393  return false;
394  }
395 
396  void MarkObject(HeapObject* object) {
397 #ifdef THREAD_SANITIZER
398  // Perform a dummy acquire load to tell TSAN that there is no data race
399  // in mark-bit initialization. See MemoryChunk::Initialize for the
400  // corresponding release store.
401  MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
402  CHECK_NOT_NULL(chunk->synchronized_heap());
403 #endif
404  if (marking_state_.WhiteToGrey(object)) {
405  shared_.Push(object);
406  }
407  }
408 
409  private:
410  // Helper class for collecting in-object slot addresses and values.
411  class SlotSnapshottingVisitor final : public ObjectVisitor {
412  public:
413  explicit SlotSnapshottingVisitor(SlotSnapshot* slot_snapshot)
414  : slot_snapshot_(slot_snapshot) {
415  slot_snapshot_->clear();
416  }
417 
418  void VisitPointers(HeapObject* host, ObjectSlot start,
419  ObjectSlot end) override {
420  for (ObjectSlot p = start; p < end; ++p) {
421  Object* object = p.Relaxed_Load();
422  slot_snapshot_->add(p, object);
423  }
424  }
425 
426  void VisitPointers(HeapObject* host, MaybeObjectSlot start,
427  MaybeObjectSlot end) override {
428  // This should never happen, because we don't use snapshotting for objects
429  // which contain weak references.
430  UNREACHABLE();
431  }
432 
433  void VisitCustomWeakPointers(HeapObject* host, ObjectSlot start,
434  ObjectSlot end) override {
435  DCHECK(host->IsJSWeakCell());
436  }
437 
438  private:
439  SlotSnapshot* slot_snapshot_;
440  };
441 
442  template <typename T>
443  int VisitJSObjectSubclass(Map map, T* object) {
444  int size = T::BodyDescriptor::SizeOf(map, object);
445  int used_size = map->UsedInstanceSize();
446  DCHECK_LE(used_size, size);
447  DCHECK_GE(used_size, T::kHeaderSize);
448  return VisitWithSnapshot(map, object, used_size, size);
449  }
450 
451  template <typename T>
452  int VisitEmbedderTracingSubclass(Map map, T* object) {
453  DCHECK(object->IsApiWrapper());
454  int size = VisitJSObjectSubclass(map, object);
455  if (size && embedder_tracing_enabled_) {
456  // Success: The object needs to be processed for embedder references on
457  // the main thread.
458  embedder_objects_.Push(object);
459  }
460  return size;
461  }
462 
463  template <typename T>
464  int VisitLeftTrimmableArray(Map map, T object) {
465  // The synchronized_length() function checks that the length is a Smi.
466  // This is not necessarily the case if the array is being left-trimmed.
467  Object* length = object->unchecked_synchronized_length();
468  if (!ShouldVisit(object)) return 0;
469  // The cached length must be the actual length as the array is not black.
470  // Left trimming marks the array black before over-writing the length.
471  DCHECK(length->IsSmi());
472  int size = T::SizeFor(Smi::ToInt(length));
473  VisitMapPointer(object, object->map_slot());
474  T::BodyDescriptor::IterateBody(map, object, size, this);
475  return size;
476  }
477 
478  template <typename T>
479  int VisitWithSnapshot(Map map, T object, int used_size, int size) {
480  const SlotSnapshot& snapshot = MakeSlotSnapshot(map, object, used_size);
481  if (!ShouldVisit(object)) return 0;
482  VisitPointersInSnapshot(object, snapshot);
483  return size;
484  }
485 
486  template <typename T>
487  const SlotSnapshot& MakeSlotSnapshot(Map map, T object, int size) {
488  SlotSnapshottingVisitor visitor(&slot_snapshot_);
489  visitor.VisitPointer(object, ObjectSlot(object->map_slot().address()));
490  // TODO(3770): Drop std::remove_pointer after the migration.
491  std::remove_pointer<T>::type::BodyDescriptor::IterateBody(map, object, size,
492  &visitor);
493  return slot_snapshot_;
494  }
495 
496  ConcurrentMarking::MarkingWorklist::View shared_;
497  ConcurrentMarking::MarkingWorklist::View bailout_;
498  WeakObjects* weak_objects_;
499  ConcurrentMarking::EmbedderTracingWorklist::View embedder_objects_;
500  ConcurrentMarkingState marking_state_;
501  int task_id_;
502  SlotSnapshot slot_snapshot_;
503  bool embedder_tracing_enabled_;
504 };
505 
506 // Strings can change maps due to conversion to thin string or external strings.
507 // Use unchecked cast to avoid data race in slow dchecks.
508 template <>
509 ConsString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
510  return ConsString::unchecked_cast(object);
511 }
512 
513 template <>
514 SlicedString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
515  return SlicedString::unchecked_cast(object);
516 }
517 
518 template <>
519 ThinString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
520  return ThinString::unchecked_cast(object);
521 }
522 
523 template <>
524 SeqOneByteString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
525  return SeqOneByteString::unchecked_cast(object);
526 }
527 
528 template <>
529 SeqTwoByteString ConcurrentMarkingVisitor::Cast(HeapObject* object) {
530  return SeqTwoByteString::unchecked_cast(object);
531 }
532 
533 // Fixed array can become a free space during left trimming.
534 template <>
535 FixedArray ConcurrentMarkingVisitor::Cast(HeapObject* object) {
536  return FixedArray::unchecked_cast(object);
537 }
538 
540  public:
541  Task(Isolate* isolate, ConcurrentMarking* concurrent_marking,
542  TaskState* task_state, int task_id)
543  : CancelableTask(isolate),
544  concurrent_marking_(concurrent_marking),
545  task_state_(task_state),
546  task_id_(task_id) {}
547 
548  ~Task() override = default;
549 
550  private:
551  // v8::internal::CancelableTask overrides.
552  void RunInternal() override {
553  concurrent_marking_->Run(task_id_, task_state_);
554  }
555 
556  ConcurrentMarking* concurrent_marking_;
557  TaskState* task_state_;
558  int task_id_;
559  DISALLOW_COPY_AND_ASSIGN(Task);
560 };
561 
562 ConcurrentMarking::ConcurrentMarking(Heap* heap, MarkingWorklist* shared,
563  MarkingWorklist* bailout,
564  MarkingWorklist* on_hold,
565  WeakObjects* weak_objects,
566  EmbedderTracingWorklist* embedder_objects)
567  : heap_(heap),
568  shared_(shared),
569  bailout_(bailout),
570  on_hold_(on_hold),
571  weak_objects_(weak_objects),
572  embedder_objects_(embedder_objects) {
573 // The runtime flag should be set only if the compile time flag was set.
574 #ifndef V8_CONCURRENT_MARKING
575  CHECK(!FLAG_concurrent_marking && !FLAG_parallel_marking);
576 #endif
577 }
578 
579 void ConcurrentMarking::Run(int task_id, TaskState* task_state) {
580  TRACE_BACKGROUND_GC(heap_->tracer(),
581  GCTracer::BackgroundScope::MC_BACKGROUND_MARKING);
582  size_t kBytesUntilInterruptCheck = 64 * KB;
583  int kObjectsUntilInterrupCheck = 1000;
584  ConcurrentMarkingVisitor visitor(
585  shared_, bailout_, &task_state->live_bytes, weak_objects_,
586  embedder_objects_, task_id, heap_->local_embedder_heap_tracer()->InUse());
587  double time_ms;
588  size_t marked_bytes = 0;
589  if (FLAG_trace_concurrent_marking) {
590  heap_->isolate()->PrintWithTimestamp(
591  "Starting concurrent marking task %d\n", task_id);
592  }
593  bool ephemeron_marked = false;
594 
595  {
596  TimedScope scope(&time_ms);
597 
598  {
599  Ephemeron ephemeron;
600 
601  while (weak_objects_->current_ephemerons.Pop(task_id, &ephemeron)) {
602  if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
603  ephemeron_marked = true;
604  }
605  }
606  }
607 
608  bool done = false;
609  while (!done) {
610  size_t current_marked_bytes = 0;
611  int objects_processed = 0;
612  while (current_marked_bytes < kBytesUntilInterruptCheck &&
613  objects_processed < kObjectsUntilInterrupCheck) {
614  HeapObject* object;
615  if (!shared_->Pop(task_id, &object)) {
616  done = true;
617  break;
618  }
619  objects_processed++;
620  // The order of the two loads is important.
621  Address new_space_top = heap_->new_space()->original_top_acquire();
622  Address new_space_limit = heap_->new_space()->original_limit_relaxed();
623  Address addr = object->address();
624  if (new_space_top <= addr && addr < new_space_limit) {
625  on_hold_->Push(task_id, object);
626  } else {
627  Map map = object->synchronized_map();
628  current_marked_bytes += visitor.Visit(map, object);
629  }
630  }
631  marked_bytes += current_marked_bytes;
632  base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes,
633  marked_bytes);
634  if (task_state->preemption_request) {
635  TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.gc"),
636  "ConcurrentMarking::Run Preempted");
637  break;
638  }
639  }
640 
641  if (done) {
642  Ephemeron ephemeron;
643 
644  while (weak_objects_->discovered_ephemerons.Pop(task_id, &ephemeron)) {
645  if (visitor.VisitEphemeron(ephemeron.key, ephemeron.value)) {
646  ephemeron_marked = true;
647  }
648  }
649  }
650 
651  shared_->FlushToGlobal(task_id);
652  bailout_->FlushToGlobal(task_id);
653  on_hold_->FlushToGlobal(task_id);
654  embedder_objects_->FlushToGlobal(task_id);
655 
656  weak_objects_->transition_arrays.FlushToGlobal(task_id);
657  weak_objects_->ephemeron_hash_tables.FlushToGlobal(task_id);
658  weak_objects_->current_ephemerons.FlushToGlobal(task_id);
659  weak_objects_->next_ephemerons.FlushToGlobal(task_id);
660  weak_objects_->discovered_ephemerons.FlushToGlobal(task_id);
661  weak_objects_->weak_references.FlushToGlobal(task_id);
662  weak_objects_->js_weak_cells.FlushToGlobal(task_id);
663  base::AsAtomicWord::Relaxed_Store<size_t>(&task_state->marked_bytes, 0);
664  total_marked_bytes_ += marked_bytes;
665 
666  if (ephemeron_marked) {
667  set_ephemeron_marked(true);
668  }
669 
670  {
671  base::MutexGuard guard(&pending_lock_);
672  is_pending_[task_id] = false;
673  --pending_task_count_;
674  pending_condition_.NotifyAll();
675  }
676  }
677  if (FLAG_trace_concurrent_marking) {
678  heap_->isolate()->PrintWithTimestamp(
679  "Task %d concurrently marked %dKB in %.2fms\n", task_id,
680  static_cast<int>(marked_bytes / KB), time_ms);
681  }
682 }
683 
684 void ConcurrentMarking::ScheduleTasks() {
685  DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
686  DCHECK(!heap_->IsTearingDown());
687  base::MutexGuard guard(&pending_lock_);
688  DCHECK_EQ(0, pending_task_count_);
689  if (task_count_ == 0) {
690  static const int num_cores =
691  V8::GetCurrentPlatform()->NumberOfWorkerThreads() + 1;
692 #if defined(V8_OS_MACOSX)
693  // Mac OSX 10.11 and prior seems to have trouble when doing concurrent
694  // marking on competing hyper-threads (regresses Octane/Splay). As such,
695  // only use num_cores/2, leaving one of those for the main thread.
696  // TODO(ulan): Use all cores on Mac 10.12+.
697  task_count_ = Max(1, Min(kMaxTasks, (num_cores / 2) - 1));
698 #else // defined(OS_MACOSX)
699  // On other platforms use all logical cores, leaving one for the main
700  // thread.
701  task_count_ = Max(1, Min(kMaxTasks, num_cores - 1));
702 #endif // defined(OS_MACOSX)
703  }
704  // Task id 0 is for the main thread.
705  for (int i = 1; i <= task_count_; i++) {
706  if (!is_pending_[i]) {
707  if (FLAG_trace_concurrent_marking) {
708  heap_->isolate()->PrintWithTimestamp(
709  "Scheduling concurrent marking task %d\n", i);
710  }
711  task_state_[i].preemption_request = false;
712  is_pending_[i] = true;
713  ++pending_task_count_;
714  auto task =
715  base::make_unique<Task>(heap_->isolate(), this, &task_state_[i], i);
716  cancelable_id_[i] = task->id();
717  V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
718  }
719  }
720  DCHECK_EQ(task_count_, pending_task_count_);
721 }
722 
723 void ConcurrentMarking::RescheduleTasksIfNeeded() {
724  DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
725  if (heap_->IsTearingDown()) return;
726  {
727  base::MutexGuard guard(&pending_lock_);
728  if (pending_task_count_ > 0) return;
729  }
730  if (!shared_->IsGlobalPoolEmpty() ||
731  !weak_objects_->current_ephemerons.IsEmpty() ||
732  !weak_objects_->discovered_ephemerons.IsEmpty()) {
733  ScheduleTasks();
734  }
735 }
736 
737 bool ConcurrentMarking::Stop(StopRequest stop_request) {
738  DCHECK(FLAG_parallel_marking || FLAG_concurrent_marking);
739  base::MutexGuard guard(&pending_lock_);
740 
741  if (pending_task_count_ == 0) return false;
742 
743  if (stop_request != StopRequest::COMPLETE_TASKS_FOR_TESTING) {
744  CancelableTaskManager* task_manager =
745  heap_->isolate()->cancelable_task_manager();
746  for (int i = 1; i <= task_count_; i++) {
747  if (is_pending_[i]) {
748  if (task_manager->TryAbort(cancelable_id_[i]) ==
749  TryAbortResult::kTaskAborted) {
750  is_pending_[i] = false;
751  --pending_task_count_;
752  } else if (stop_request == StopRequest::PREEMPT_TASKS) {
753  task_state_[i].preemption_request = true;
754  }
755  }
756  }
757  }
758  while (pending_task_count_ > 0) {
759  pending_condition_.Wait(&pending_lock_);
760  }
761  for (int i = 1; i <= task_count_; i++) {
762  DCHECK(!is_pending_[i]);
763  }
764  return true;
765 }
766 
767 bool ConcurrentMarking::IsStopped() {
768  if (!FLAG_concurrent_marking) return true;
769 
770  base::MutexGuard guard(&pending_lock_);
771  return pending_task_count_ == 0;
772 }
773 
774 void ConcurrentMarking::FlushLiveBytes(
775  MajorNonAtomicMarkingState* marking_state) {
776  DCHECK_EQ(pending_task_count_, 0);
777  for (int i = 1; i <= task_count_; i++) {
778  LiveBytesMap& live_bytes = task_state_[i].live_bytes;
779  for (auto pair : live_bytes) {
780  // ClearLiveness sets the live bytes to zero.
781  // Pages with zero live bytes might be already unmapped.
782  if (pair.second != 0) {
783  marking_state->IncrementLiveBytes(pair.first, pair.second);
784  }
785  }
786  live_bytes.clear();
787  task_state_[i].marked_bytes = 0;
788  }
789  total_marked_bytes_ = 0;
790 }
791 
792 void ConcurrentMarking::ClearLiveness(MemoryChunk* chunk) {
793  for (int i = 1; i <= task_count_; i++) {
794  if (task_state_[i].live_bytes.count(chunk)) {
795  task_state_[i].live_bytes[chunk] = 0;
796  }
797  }
798 }
799 
800 size_t ConcurrentMarking::TotalMarkedBytes() {
801  size_t result = 0;
802  for (int i = 1; i <= task_count_; i++) {
803  result +=
804  base::AsAtomicWord::Relaxed_Load<size_t>(&task_state_[i].marked_bytes);
805  }
806  result += total_marked_bytes_;
807  return result;
808 }
809 
810 ConcurrentMarking::PauseScope::PauseScope(ConcurrentMarking* concurrent_marking)
811  : concurrent_marking_(concurrent_marking),
812  resume_on_exit_(FLAG_concurrent_marking &&
813  concurrent_marking_->Stop(
814  ConcurrentMarking::StopRequest::PREEMPT_TASKS)) {
815  DCHECK_IMPLIES(resume_on_exit_, FLAG_concurrent_marking);
816 }
817 
818 ConcurrentMarking::PauseScope::~PauseScope() {
819  if (resume_on_exit_) concurrent_marking_->RescheduleTasksIfNeeded();
820 }
821 
822 } // namespace internal
823 } // namespace v8
Definition: libplatform.h:13