V8 API Reference, 7.2.502.16 (for Deno 0.2.4)
sweeper.cc
1 // Copyright 2017 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "src/heap/sweeper.h"
6 
7 #include "src/base/template-utils.h"
8 #include "src/heap/array-buffer-tracker-inl.h"
9 #include "src/heap/gc-tracer.h"
10 #include "src/heap/mark-compact-inl.h"
11 #include "src/heap/remembered-set.h"
12 #include "src/objects-inl.h"
13 #include "src/vm-state-inl.h"
14 
15 namespace v8 {
16 namespace internal {
17 
18 Sweeper::Sweeper(Heap* heap, MajorNonAtomicMarkingState* marking_state)
19  : heap_(heap),
20  marking_state_(marking_state),
21  num_tasks_(0),
22  pending_sweeper_tasks_semaphore_(0),
23  incremental_sweeper_pending_(false),
24  sweeping_in_progress_(false),
25  num_sweeping_tasks_(0),
26  stop_sweeper_tasks_(false),
27  iterability_task_semaphore_(0),
28  iterability_in_progress_(false),
29  iterability_task_started_(false),
30  should_reduce_memory_(false) {}
31 
32 Sweeper::PauseOrCompleteScope::PauseOrCompleteScope(Sweeper* sweeper)
33  : sweeper_(sweeper) {
34  sweeper_->stop_sweeper_tasks_ = true;
35  if (!sweeper_->sweeping_in_progress()) return;
36 
37  sweeper_->AbortAndWaitForTasks();
38 
39  // Complete sweeping if there's nothing more to do.
40  if (sweeper_->IsDoneSweeping()) {
41  sweeper_->heap_->mark_compact_collector()->EnsureSweepingCompleted();
42  DCHECK(!sweeper_->sweeping_in_progress());
43  } else {
44  // Unless sweeping is complete the flag still indicates that the sweeper
45  // is enabled. It just cannot use tasks anymore.
46  DCHECK(sweeper_->sweeping_in_progress());
47  }
48 }
49 
50 Sweeper::PauseOrCompleteScope::~PauseOrCompleteScope() {
51  sweeper_->stop_sweeper_tasks_ = false;
52  if (!sweeper_->sweeping_in_progress()) return;
53 
54  sweeper_->StartSweeperTasks();
55 }
56 
57 Sweeper::FilterSweepingPagesScope::FilterSweepingPagesScope(
58  Sweeper* sweeper, const PauseOrCompleteScope& pause_or_complete_scope)
59  : sweeper_(sweeper),
60  pause_or_complete_scope_(pause_or_complete_scope),
61  sweeping_in_progress_(sweeper_->sweeping_in_progress()) {
62  USE(pause_or_complete_scope_);
63  if (!sweeping_in_progress_) return;
64 
65  int old_space_index = GetSweepSpaceIndex(OLD_SPACE);
66  old_space_sweeping_list_ =
67  std::move(sweeper_->sweeping_list_[old_space_index]);
68  sweeper_->sweeping_list_[old_space_index].clear();
69 }
70 
71 Sweeper::FilterSweepingPagesScope::~FilterSweepingPagesScope() {
72  DCHECK_EQ(sweeping_in_progress_, sweeper_->sweeping_in_progress());
73  if (!sweeping_in_progress_) return;
74 
75  sweeper_->sweeping_list_[GetSweepSpaceIndex(OLD_SPACE)] =
76  std::move(old_space_sweeping_list_);
77  // old_space_sweeping_list_ does not need to be cleared as we don't use it.
78 }
79 
80 class Sweeper::SweeperTask final : public CancelableTask {
81  public:
82  SweeperTask(Isolate* isolate, Sweeper* sweeper,
83  base::Semaphore* pending_sweeper_tasks,
84  std::atomic<intptr_t>* num_sweeping_tasks,
85  AllocationSpace space_to_start)
86  : CancelableTask(isolate),
87  sweeper_(sweeper),
88  pending_sweeper_tasks_(pending_sweeper_tasks),
89  num_sweeping_tasks_(num_sweeping_tasks),
90  space_to_start_(space_to_start),
91  tracer_(isolate->heap()->tracer()) {}
92 
93  ~SweeperTask() override = default;
94 
95  private:
96  void RunInternal() final {
97  TRACE_BACKGROUND_GC(tracer_,
98  GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
99  DCHECK(IsValidSweepingSpace(space_to_start_));
100  const int offset = space_to_start_ - FIRST_GROWABLE_PAGED_SPACE;
101  for (int i = 0; i < kNumberOfSweepingSpaces; i++) {
102  const AllocationSpace space_id = static_cast<AllocationSpace>(
103  FIRST_GROWABLE_PAGED_SPACE +
104  ((i + offset) % kNumberOfSweepingSpaces));
105  // Do not sweep code space concurrently.
106  if (space_id == CODE_SPACE) continue;
107  DCHECK(IsValidSweepingSpace(space_id));
108  sweeper_->SweepSpaceFromTask(space_id);
109  }
110  (*num_sweeping_tasks_)--;
111  pending_sweeper_tasks_->Signal();
112  }
113 
114  Sweeper* const sweeper_;
115  base::Semaphore* const pending_sweeper_tasks_;
116  std::atomic<intptr_t>* const num_sweeping_tasks_;
117  AllocationSpace space_to_start_;
118  GCTracer* const tracer_;
119 
120  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
121 };
122 
124  public:
125  IncrementalSweeperTask(Isolate* isolate, Sweeper* sweeper)
126  : CancelableTask(isolate), isolate_(isolate), sweeper_(sweeper) {}
127 
128  ~IncrementalSweeperTask() override = default;
129 
130  private:
131  void RunInternal() final {
132  VMState<GC> state(isolate_);
133  TRACE_EVENT_CALL_STATS_SCOPED(isolate_, "v8", "V8.Task");
134 
135  sweeper_->incremental_sweeper_pending_ = false;
136 
137  if (sweeper_->sweeping_in_progress()) {
138  if (!sweeper_->SweepSpaceIncrementallyFromTask(CODE_SPACE)) {
139  sweeper_->ScheduleIncrementalSweepingTask();
140  }
141  }
142  }
143 
144  Isolate* const isolate_;
145  Sweeper* const sweeper_;
146  DISALLOW_COPY_AND_ASSIGN(IncrementalSweeperTask);
147 };
148 
149 void Sweeper::StartSweeping() {
150  CHECK(!stop_sweeper_tasks_);
151  sweeping_in_progress_ = true;
152  iterability_in_progress_ = true;
153  should_reduce_memory_ = heap_->ShouldReduceMemory();
154  MajorNonAtomicMarkingState* marking_state =
155  heap_->mark_compact_collector()->non_atomic_marking_state();
156  ForAllSweepingSpaces([this, marking_state](AllocationSpace space) {
157  int space_index = GetSweepSpaceIndex(space);
158  std::sort(sweeping_list_[space_index].begin(),
159  sweeping_list_[space_index].end(),
160  [marking_state](Page* a, Page* b) {
161  return marking_state->live_bytes(a) <
162  marking_state->live_bytes(b);
163  });
164  });
165 }
166 
167 void Sweeper::StartSweeperTasks() {
168  DCHECK_EQ(0, num_tasks_);
169  DCHECK_EQ(0, num_sweeping_tasks_);
170  if (FLAG_concurrent_sweeping && sweeping_in_progress_ &&
171  !heap_->delay_sweeper_tasks_for_testing_) {
172  ForAllSweepingSpaces([this](AllocationSpace space) {
173  DCHECK(IsValidSweepingSpace(space));
174  num_sweeping_tasks_++;
175  auto task = base::make_unique<SweeperTask>(
176  heap_->isolate(), this, &pending_sweeper_tasks_semaphore_,
177  &num_sweeping_tasks_, space);
178  DCHECK_LT(num_tasks_, kMaxSweeperTasks);
179  task_ids_[num_tasks_++] = task->id();
180  V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
181  });
182  ScheduleIncrementalSweepingTask();
183  }
184 }
185 
186 void Sweeper::SweepOrWaitUntilSweepingCompleted(Page* page) {
187  if (!page->SweepingDone()) {
188  ParallelSweepPage(page, page->owner()->identity());
189  if (!page->SweepingDone()) {
190  // We were not able to sweep that page, i.e., a concurrent
191  // sweeper thread currently owns this page. Wait for the sweeper
192  // thread to be done with this page.
193  page->WaitUntilSweepingCompleted();
194  }
195  }
196 }
197 
198 Page* Sweeper::GetSweptPageSafe(PagedSpace* space) {
199  base::MutexGuard guard(&mutex_);
200  SweptList& list = swept_list_[GetSweepSpaceIndex(space->identity())];
201  if (!list.empty()) {
202  auto last_page = list.back();
203  list.pop_back();
204  return last_page;
205  }
206  return nullptr;
207 }
208 
209 void Sweeper::AbortAndWaitForTasks() {
210  if (!FLAG_concurrent_sweeping) return;
211 
212  for (int i = 0; i < num_tasks_; i++) {
213  if (heap_->isolate()->cancelable_task_manager()->TryAbort(task_ids_[i]) !=
214  TryAbortResult::kTaskAborted) {
215  pending_sweeper_tasks_semaphore_.Wait();
216  } else {
217  // Aborted case.
218  num_sweeping_tasks_--;
219  }
220  }
221  num_tasks_ = 0;
222  DCHECK_EQ(0, num_sweeping_tasks_);
223 }
224 
225 void Sweeper::EnsureCompleted() {
226  if (!sweeping_in_progress_) return;
227 
228  EnsureIterabilityCompleted();
229 
230  // If sweeping is not completed or not running at all, we try to complete it
231  // here.
232  ForAllSweepingSpaces(
233  [this](AllocationSpace space) { ParallelSweepSpace(space, 0); });
234 
235  AbortAndWaitForTasks();
236 
237  ForAllSweepingSpaces([this](AllocationSpace space) {
238  CHECK(sweeping_list_[GetSweepSpaceIndex(space)].empty());
239  });
240  sweeping_in_progress_ = false;
241 }
242 
243 bool Sweeper::AreSweeperTasksRunning() { return num_sweeping_tasks_ != 0; }
244 
245 int Sweeper::RawSweep(Page* p, FreeListRebuildingMode free_list_mode,
246  FreeSpaceTreatmentMode free_space_mode) {
247  Space* space = p->owner();
248  DCHECK_NOT_NULL(space);
249  DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
250  space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
251  DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
252 
253  // TODO(ulan): we don't have to clear type old-to-old slots in code space
254  // because the concurrent marker doesn't mark code objects. This requires
255  // the write barrier for code objects to check the color of the code object.
256  bool non_empty_typed_slots = p->typed_slot_set<OLD_TO_NEW>() != nullptr ||
257  p->typed_slot_set<OLD_TO_OLD>() != nullptr;
258 
259  // The free ranges map is used for filtering typed slots.
260  std::map<uint32_t, uint32_t> free_ranges;
261 
262  // Before we sweep objects on the page, we free dead array buffers which
263  // requires valid mark bits.
264  ArrayBufferTracker::FreeDead(p, marking_state_);
265 
266  Address free_start = p->area_start();
267 
268  // If we use the skip list for code space pages, we have to lock the skip
269  // list because it could be accessed concurrently by the runtime or the
270  // deoptimizer.
271  const bool rebuild_skip_list =
272  space->identity() == CODE_SPACE && p->skip_list() != nullptr;
273  SkipList* skip_list = p->skip_list();
274  if (rebuild_skip_list) {
275  skip_list->Clear();
276  }
277 
278  intptr_t live_bytes = 0;
279  intptr_t freed_bytes = 0;
280  intptr_t max_freed_bytes = 0;
281  int curr_region = -1;
282 
283  // Set the allocated_bytes counter to area_size. The free operations below
284  // will decrease the counter to actual live bytes.
285  p->ResetAllocatedBytes();
286 
287  for (auto object_and_size :
288  LiveObjectRange<kBlackObjects>(p, marking_state_->bitmap(p))) {
289  HeapObject* const object = object_and_size.first;
290  DCHECK(marking_state_->IsBlack(object));
291  Address free_end = object->address();
292  if (free_end != free_start) {
293  CHECK_GT(free_end, free_start);
294  size_t size = static_cast<size_t>(free_end - free_start);
295  if (free_space_mode == ZAP_FREE_SPACE) {
296  ZapCode(free_start, size);
297  }
298  if (free_list_mode == REBUILD_FREE_LIST) {
299  freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
300  free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
301  max_freed_bytes = Max(freed_bytes, max_freed_bytes);
302  } else {
303  p->heap()->CreateFillerObjectAt(
304  free_start, static_cast<int>(size), ClearRecordedSlots::kNo,
305  ClearFreedMemoryMode::kClearFreedMemory);
306  }
307  if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
308  RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, free_end,
309  SlotSet::KEEP_EMPTY_BUCKETS);
310  RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, free_end,
311  SlotSet::KEEP_EMPTY_BUCKETS);
312  if (non_empty_typed_slots) {
313  free_ranges.insert(std::pair<uint32_t, uint32_t>(
314  static_cast<uint32_t>(free_start - p->address()),
315  static_cast<uint32_t>(free_end - p->address())));
316  }
317  }
318  Map map = object->synchronized_map();
319  int size = object->SizeFromMap(map);
320  live_bytes += size;
321  if (rebuild_skip_list) {
322  int new_region_start = SkipList::RegionNumber(free_end);
323  int new_region_end =
324  SkipList::RegionNumber(free_end + size - kPointerSize);
325  if (new_region_start != curr_region || new_region_end != curr_region) {
326  skip_list->AddObject(free_end, size);
327  curr_region = new_region_end;
328  }
329  }
330  free_start = free_end + size;
331  }
332 
333  if (free_start != p->area_end()) {
334  CHECK_GT(p->area_end(), free_start);
335  size_t size = static_cast<size_t>(p->area_end() - free_start);
336  if (free_space_mode == ZAP_FREE_SPACE) {
337  ZapCode(free_start, size);
338  }
339  if (free_list_mode == REBUILD_FREE_LIST) {
340  freed_bytes = reinterpret_cast<PagedSpace*>(space)->Free(
341  free_start, size, SpaceAccountingMode::kSpaceUnaccounted);
342  max_freed_bytes = Max(freed_bytes, max_freed_bytes);
343  } else {
344  p->heap()->CreateFillerObjectAt(free_start, static_cast<int>(size),
345  ClearRecordedSlots::kNo,
346  ClearFreedMemoryMode::kClearFreedMemory);
347  }
348  if (should_reduce_memory_) p->DiscardUnusedMemory(free_start, size);
349  RememberedSet<OLD_TO_NEW>::RemoveRange(p, free_start, p->area_end(),
350  SlotSet::KEEP_EMPTY_BUCKETS);
351  RememberedSet<OLD_TO_OLD>::RemoveRange(p, free_start, p->area_end(),
352  SlotSet::KEEP_EMPTY_BUCKETS);
353  if (non_empty_typed_slots) {
354  free_ranges.insert(std::pair<uint32_t, uint32_t>(
355  static_cast<uint32_t>(free_start - p->address()),
356  static_cast<uint32_t>(p->area_end() - p->address())));
357  }
358  }
359 
360  // Clear invalid typed slots after collection all free ranges.
361  if (!free_ranges.empty()) {
362  TypedSlotSet* old_to_new = p->typed_slot_set<OLD_TO_NEW>();
363  if (old_to_new != nullptr) {
364  old_to_new->ClearInvalidSlots(free_ranges);
365  }
366  TypedSlotSet* old_to_old = p->typed_slot_set<OLD_TO_OLD>();
367  if (old_to_old != nullptr) {
368  old_to_old->ClearInvalidSlots(free_ranges);
369  }
370  }
371 
372  marking_state_->bitmap(p)->Clear();
373  if (free_list_mode == IGNORE_FREE_LIST) {
374  marking_state_->SetLiveBytes(p, 0);
375  // We did not free memory, so have to adjust allocated bytes here.
376  intptr_t freed_bytes = p->area_size() - live_bytes;
377  p->DecreaseAllocatedBytes(freed_bytes);
378  } else {
379  // Keep the old live bytes counter of the page until RefillFreeList, where
380  // the space size is refined.
381  // The allocated_bytes() counter is precisely the total size of objects.
382  DCHECK_EQ(live_bytes, p->allocated_bytes());
383  }
384  p->set_concurrent_sweeping_state(Page::kSweepingDone);
385  if (free_list_mode == IGNORE_FREE_LIST) return 0;
386  return static_cast<int>(FreeList::GuaranteedAllocatable(max_freed_bytes));
387 }
388 
389 void Sweeper::SweepSpaceFromTask(AllocationSpace identity) {
390  Page* page = nullptr;
391  while (!stop_sweeper_tasks_ &&
392  ((page = GetSweepingPageSafe(identity)) != nullptr)) {
393  ParallelSweepPage(page, identity);
394  }
395 }
396 
397 bool Sweeper::SweepSpaceIncrementallyFromTask(AllocationSpace identity) {
398  if (Page* page = GetSweepingPageSafe(identity)) {
399  ParallelSweepPage(page, identity);
400  }
401  return sweeping_list_[GetSweepSpaceIndex(identity)].empty();
402 }
403 
404 int Sweeper::ParallelSweepSpace(AllocationSpace identity,
405  int required_freed_bytes, int max_pages) {
406  int max_freed = 0;
407  int pages_freed = 0;
408  Page* page = nullptr;
409  while ((page = GetSweepingPageSafe(identity)) != nullptr) {
410  int freed = ParallelSweepPage(page, identity);
411  pages_freed += 1;
412  DCHECK_GE(freed, 0);
413  max_freed = Max(max_freed, freed);
414  if ((required_freed_bytes) > 0 && (max_freed >= required_freed_bytes))
415  return max_freed;
416  if ((max_pages > 0) && (pages_freed >= max_pages)) return max_freed;
417  }
418  return max_freed;
419 }
420 
421 int Sweeper::ParallelSweepPage(Page* page, AllocationSpace identity) {
422  // Early bailout for pages that are swept outside of the regular sweeping
423  // path. This check here avoids taking the lock first, avoiding deadlocks.
424  if (page->SweepingDone()) return 0;
425 
426  DCHECK(IsValidSweepingSpace(identity));
427  int max_freed = 0;
428  {
429  base::MutexGuard guard(page->mutex());
430  // If this page was already swept in the meantime, we can return here.
431  if (page->SweepingDone()) return 0;
432 
433  // If the page is a code page, the CodePageMemoryModificationScope changes
434  // the page protection mode from rx -> rw while sweeping.
435  CodePageMemoryModificationScope code_page_scope(page);
436 
437  DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
438  page->set_concurrent_sweeping_state(Page::kSweepingInProgress);
439  const FreeSpaceTreatmentMode free_space_mode =
440  Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
441  max_freed = RawSweep(page, REBUILD_FREE_LIST, free_space_mode);
442  DCHECK(page->SweepingDone());
443 
444  // After finishing sweeping of a page we clean up its remembered set.
445  TypedSlotSet* typed_slot_set = page->typed_slot_set<OLD_TO_NEW>();
446  if (typed_slot_set) {
447  typed_slot_set->FreeToBeFreedChunks();
448  }
449  SlotSet* slot_set = page->slot_set<OLD_TO_NEW>();
450  if (slot_set) {
451  slot_set->FreeToBeFreedBuckets();
452  }
453  }
454 
455  {
456  base::MutexGuard guard(&mutex_);
457  swept_list_[GetSweepSpaceIndex(identity)].push_back(page);
458  }
459  return max_freed;
460 }
461 
462 void Sweeper::ScheduleIncrementalSweepingTask() {
463  if (!incremental_sweeper_pending_) {
464  incremental_sweeper_pending_ = true;
465  v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(heap_->isolate());
466  auto taskrunner =
467  V8::GetCurrentPlatform()->GetForegroundTaskRunner(isolate);
468  taskrunner->PostTask(
469  base::make_unique<IncrementalSweeperTask>(heap_->isolate(), this));
470  }
471 }
472 
473 void Sweeper::AddPage(AllocationSpace space, Page* page,
474  Sweeper::AddPageMode mode) {
475  base::MutexGuard guard(&mutex_);
476  DCHECK(IsValidSweepingSpace(space));
477  DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning());
478  if (mode == Sweeper::REGULAR) {
479  PrepareToBeSweptPage(space, page);
480  } else {
481  // Page has been temporarily removed from the sweeper. Accounting already
482  // happened when the page was initially added, so it is skipped here.
483  DCHECK_EQ(Sweeper::READD_TEMPORARY_REMOVED_PAGE, mode);
484  }
485  DCHECK_EQ(Page::kSweepingPending, page->concurrent_sweeping_state());
486  sweeping_list_[GetSweepSpaceIndex(space)].push_back(page);
487 }
488 
489 void Sweeper::PrepareToBeSweptPage(AllocationSpace space, Page* page) {
490  DCHECK_GE(page->area_size(),
491  static_cast<size_t>(marking_state_->live_bytes(page)));
492  DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
493  page->ForAllFreeListCategories(
494  [](FreeListCategory* category) { DCHECK(!category->is_linked()); });
495  page->set_concurrent_sweeping_state(Page::kSweepingPending);
496  heap_->paged_space(space)->IncreaseAllocatedBytes(
497  marking_state_->live_bytes(page), page);
498 }
499 
500 Page* Sweeper::GetSweepingPageSafe(AllocationSpace space) {
501  base::MutexGuard guard(&mutex_);
502  DCHECK(IsValidSweepingSpace(space));
503  int space_index = GetSweepSpaceIndex(space);
504  Page* page = nullptr;
505  if (!sweeping_list_[space_index].empty()) {
506  page = sweeping_list_[space_index].front();
507  sweeping_list_[space_index].pop_front();
508  }
509  return page;
510 }
511 
512 void Sweeper::EnsurePageIsIterable(Page* page) {
513  AllocationSpace space = page->owner()->identity();
514  if (IsValidSweepingSpace(space)) {
515  SweepOrWaitUntilSweepingCompleted(page);
516  } else {
517  DCHECK(IsValidIterabilitySpace(space));
518  EnsureIterabilityCompleted();
519  }
520 }
521 
522 void Sweeper::EnsureIterabilityCompleted() {
523  if (!iterability_in_progress_) return;
524 
525  if (FLAG_concurrent_sweeping && iterability_task_started_) {
526  if (heap_->isolate()->cancelable_task_manager()->TryAbort(
527  iterability_task_id_) != TryAbortResult::kTaskAborted) {
528  iterability_task_semaphore_.Wait();
529  }
530  iterability_task_started_ = false;
531  }
532 
533  for (Page* page : iterability_list_) {
534  MakeIterable(page);
535  }
536  iterability_list_.clear();
537  iterability_in_progress_ = false;
538 }
539 
541  public:
542  IterabilityTask(Isolate* isolate, Sweeper* sweeper,
543  base::Semaphore* pending_iterability_task)
544  : CancelableTask(isolate),
545  sweeper_(sweeper),
546  pending_iterability_task_(pending_iterability_task),
547  tracer_(isolate->heap()->tracer()) {}
548 
549  ~IterabilityTask() override = default;
550 
551  private:
552  void RunInternal() final {
553  TRACE_BACKGROUND_GC(tracer_,
554  GCTracer::BackgroundScope::MC_BACKGROUND_SWEEPING);
555  for (Page* page : sweeper_->iterability_list_) {
556  sweeper_->MakeIterable(page);
557  }
558  sweeper_->iterability_list_.clear();
559  pending_iterability_task_->Signal();
560  }
561 
562  Sweeper* const sweeper_;
563  base::Semaphore* const pending_iterability_task_;
564  GCTracer* const tracer_;
565 
566  DISALLOW_COPY_AND_ASSIGN(IterabilityTask);
567 };
568 
569 void Sweeper::StartIterabilityTasks() {
570  if (!iterability_in_progress_) return;
571 
572  DCHECK(!iterability_task_started_);
573  if (FLAG_concurrent_sweeping && !iterability_list_.empty()) {
574  auto task = base::make_unique<IterabilityTask>(
575  heap_->isolate(), this, &iterability_task_semaphore_);
576  iterability_task_id_ = task->id();
577  iterability_task_started_ = true;
578  V8::GetCurrentPlatform()->CallOnWorkerThread(std::move(task));
579  }
580 }
581 
582 void Sweeper::AddPageForIterability(Page* page) {
583  DCHECK(sweeping_in_progress_);
584  DCHECK(iterability_in_progress_);
585  DCHECK(!iterability_task_started_);
586  DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
587  DCHECK_EQ(Page::kSweepingDone, page->concurrent_sweeping_state());
588 
589  iterability_list_.push_back(page);
590  page->set_concurrent_sweeping_state(Page::kSweepingPending);
591 }
592 
593 void Sweeper::MakeIterable(Page* page) {
594  DCHECK(IsValidIterabilitySpace(page->owner()->identity()));
595  const FreeSpaceTreatmentMode free_space_mode =
596  Heap::ShouldZapGarbage() ? ZAP_FREE_SPACE : IGNORE_FREE_SPACE;
597  RawSweep(page, IGNORE_FREE_LIST, free_space_mode);
598 }
599 
600 } // namespace internal
601 } // namespace v8
Definition: libplatform.h:13